Merge
authorduke
Wed, 05 Jul 2017 19:58:32 +0200
changeset 26229 b1d885c12096
parent 26228 317aad3480c8 (current diff)
parent 26187 b50907e0a329 (diff)
child 26233 d391c2eadc8c
Merge
make/common/SetupJava.gmk
--- a/.hgtags-top-repo	Fri Aug 29 11:58:43 2014 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 19:58:32 2017 +0200
@@ -270,3 +270,4 @@
 aefd8899a8d6615fb34ba99b2e38996a7145baa8 jdk9-b25
 d3ec8d048e6c3c46b6e0ee011cc551ad386dfba5 jdk9-b26
 ba5645f2735b41ed085d07ba20fa7b322afff318 jdk9-b27
+ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28
--- a/Makefile	Fri Aug 29 11:58:43 2014 -0700
+++ b/Makefile	Wed Jul 05 19:58:32 2017 +0200
@@ -136,10 +136,12 @@
 	$(info .  make docs              # Create all docs)
 	$(info .  make docs-javadoc      # Create just javadocs, depends on less than full docs)
 	$(info .  make profiles          # Create complete j2re compact profile images)
-	$(info .  make bootcycle-images  # Build images twice, second time with newly build JDK)
+	$(info .  make bootcycle-images  # Build images twice, second time with newly built JDK)
 	$(info .  make install           # Install the generated images locally)
 	$(info .  make clean             # Remove all files generated by make, but not those)
-	$(info .                         # generated by configure)
+	$(info .                         # generated by configure. Do not run clean and other)
+	$(info .                         # targets together as that might behave in an)
+	$(info .                         # unexpected way.)
 	$(info .  make dist-clean        # Remove all files, including configuration)
 	$(info .  make help              # Give some help on using make)
 	$(info .  make test              # Run tests, default is all tests (see TEST below))
--- a/common/autoconf/basics.m4	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/basics.m4	Wed Jul 05 19:58:32 2017 +0200
@@ -849,7 +849,12 @@
   if test -f $DELETEDIR/TestIfFindSupportsDelete; then
     # No, it does not.
     rm $DELETEDIR/TestIfFindSupportsDelete
-    FIND_DELETE="-exec rm \{\} \+"
+    if test "x$OPENJDK_TARGET_OS" = "xaix"; then
+      # AIX 'find' is buggy if called with '-exec {} \+' and an empty file list
+      FIND_DELETE="-print | xargs rm"
+    else
+      FIND_DELETE="-exec rm \{\} \+"
+    fi
     AC_MSG_RESULT([no])
   else
     AC_MSG_RESULT([yes])
--- a/common/autoconf/boot-jdk.m4	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/boot-jdk.m4	Wed Jul 05 19:58:32 2017 +0200
@@ -370,18 +370,27 @@
 
   # Maximum amount of heap memory.
   # Maximum stack size.
+  JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
   if test "x$BUILD_NUM_BITS" = x32; then
-    JVM_MAX_HEAP=1100M
+    if test "$JVM_MAX_HEAP" -gt "1100"; then
+      JVM_MAX_HEAP=1100
+    elif test "$JVM_MAX_HEAP" -lt "512"; then
+      JVM_MAX_HEAP=512
+    fi
     STACK_SIZE=768
   else
     # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
     # pointers are used. Apparently, we need to increase the heap and stack
     # space for the jvm. More specifically, when running javac to build huge
     # jdk batch
-    JVM_MAX_HEAP=1600M
+    if test "$JVM_MAX_HEAP" -gt "1600"; then
+      JVM_MAX_HEAP=1600
+    elif test "$JVM_MAX_HEAP" -lt "512"; then
+      JVM_MAX_HEAP=512
+    fi
     STACK_SIZE=1536
   fi
-  ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA])
+  ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA])
   ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
 
   AC_MSG_RESULT([$boot_jdk_jvmargs_big])
--- a/common/autoconf/build-performance.m4	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/build-performance.m4	Wed Jul 05 19:58:32 2017 +0200
@@ -131,8 +131,8 @@
   if test "x$with_jobs" = x; then
     # Number of jobs was not specified, calculate.
     AC_MSG_CHECKING([for appropriate number of jobs to run in parallel])
-    # Approximate memory in GB, rounding up a bit.
-    memory_gb=`expr $MEMORY_SIZE / 1100`
+    # Approximate memory in GB.
+    memory_gb=`expr $MEMORY_SIZE / 1024`
     # Pick the lowest of memory in gb and number of cores.
     if test "$memory_gb" -lt "$NUM_CORES"; then
       JOBS="$memory_gb"
@@ -291,16 +291,11 @@
       AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA])
     fi
   else
-    SJAVAC_SERVER_JAVA=""
-    # Hotspot specific options.
-    ADD_JVM_ARG_IF_OK([-verbosegc],SJAVAC_SERVER_JAVA,[$JAVA])
-    # JRockit specific options.
-    ADD_JVM_ARG_IF_OK([-Xverbose:gc],SJAVAC_SERVER_JAVA,[$JAVA])
-    SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
+    SJAVAC_SERVER_JAVA="$JAVA"
   fi
   AC_SUBST(SJAVAC_SERVER_JAVA)
 
-  if test "$MEMORY_SIZE" -gt "2500"; then
+  if test "$MEMORY_SIZE" -gt "3000"; then
     ADD_JVM_ARG_IF_OK([-d64],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
     if test "$JVM_ARG_OK" = true; then
       JVM_64BIT=true
@@ -308,34 +303,33 @@
     fi
   fi
 
+  MX_VALUE=`expr $MEMORY_SIZE / 2`
   if test "$JVM_64BIT" = true; then
-    if test "$MEMORY_SIZE" -gt "17000"; then
-      ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
+    # Set ms lower than mx since more than one instance of the server might
+    # get launched at the same time before they figure out which instance won.
+    MS_VALUE=512
+    if test "$MX_VALUE" -gt "2048"; then
+      MX_VALUE=2048
     fi
-    if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
-      ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
-    fi
-    if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
-      ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
-    fi
-    if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
-      ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
+  else
+    MS_VALUE=256
+    if test "$MX_VALUE" -gt "1500"; then
+      MX_VALUE=1500
     fi
   fi
-  if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
-    ADD_JVM_ARG_IF_OK([-Xms1000M -Xmx1500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
-  fi
-  if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
-    ADD_JVM_ARG_IF_OK([-Xms400M -Xmx1100M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
+  if test "$MX_VALUE" -lt "512"; then
+    MX_VALUE=512
   fi
-  if test "$JVM_ARG_OK" = false; then
-    ADD_JVM_ARG_IF_OK([-Xms256M -Xmx512M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
-  fi
+  ADD_JVM_ARG_IF_OK([-Xms${MS_VALUE}M -Xmx${MX_VALUE}M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
 
-  AC_MSG_CHECKING([whether to use sjavac])
   AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
       [use sjavac to do fast incremental compiles @<:@disabled@:>@])],
       [ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
+  if test "x$JVM_ARG_OK" = "xfalse"; then
+    AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac])
+    ENABLE_SJAVAC=no;
+  fi
+  AC_MSG_CHECKING([whether to use sjavac])
   AC_MSG_RESULT([$ENABLE_SJAVAC])
   AC_SUBST(ENABLE_SJAVAC)
 
--- a/common/autoconf/configure.ac	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/configure.ac	Wed Jul 05 19:58:32 2017 +0200
@@ -142,7 +142,6 @@
 ###############################################################################
 
 BOOTJDK_SETUP_BOOT_JDK
-BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
 
 ###############################################################################
 #
@@ -233,6 +232,9 @@
 BPERF_SETUP_BUILD_MEMORY
 BPERF_SETUP_BUILD_JOBS
 
+# Setup arguments for the boot jdk (after cores and memory have been setup)
+BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
+
 # Setup smart javac (after cores and memory have been setup)
 BPERF_SETUP_SMART_JAVAC
 
--- a/common/autoconf/flags.m4	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/flags.m4	Wed Jul 05 19:58:32 2017 +0200
@@ -342,17 +342,15 @@
       # no adjustment
       ;;
     fastdebug )
-      # Add compile time bounds checks.
-      CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
-      CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
+      # no adjustment
       ;;
     slowdebug )
-      # Add runtime bounds checks and symbol info.
-      CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
-      CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
+      # Add runtime stack smashing and undefined behavior checks
+      CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
+      CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
       if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
         CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS  $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
-        CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
+        CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG"
       fi
       ;;
     esac
--- a/common/autoconf/generated-configure.sh	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/autoconf/generated-configure.sh	Wed Jul 05 19:58:32 2017 +0200
@@ -634,6 +634,10 @@
 SJAVAC_SERVER_DIR
 ENABLE_SJAVAC
 SJAVAC_SERVER_JAVA
+JAVA_TOOL_FLAGS_SMALL
+JAVA_FLAGS_SMALL
+JAVA_FLAGS_BIG
+JAVA_FLAGS
 JOBS
 MEMORY_SIZE
 NUM_CORES
@@ -805,10 +809,6 @@
 JAXP_TOPDIR
 CORBA_TOPDIR
 LANGTOOLS_TOPDIR
-JAVA_TOOL_FLAGS_SMALL
-JAVA_FLAGS_SMALL
-JAVA_FLAGS_BIG
-JAVA_FLAGS
 JAVAC_FLAGS
 BOOT_JDK_SOURCETARGET
 JARSIGNER
@@ -1064,7 +1064,6 @@
 with_user_release_suffix
 with_build_number
 with_boot_jdk
-with_boot_jdk_jvmargs
 with_add_source_root
 with_override_source_root
 with_adds_and_overrides
@@ -1106,6 +1105,7 @@
 with_num_cores
 with_memory_size
 with_jobs
+with_boot_jdk_jvmargs
 with_sjavac_server_java
 enable_sjavac
 enable_precompiled_headers
@@ -1904,10 +1904,6 @@
                           number is not set.[username_builddateb00]
   --with-build-number     Set build number value for build [b00]
   --with-boot-jdk         path to Boot JDK (used to bootstrap build) [probed]
-  --with-boot-jdk-jvmargs specify JVM arguments to be passed to all java
-                          invocations of boot JDK, overriding the default
-                          values, e.g --with-boot-jdk-jvmargs="-Xmx8G
-                          -enableassertions"
   --with-add-source-root  for each and every source directory, look in this
                           additional source root for the same directory; if it
                           exists and have files in it, include it in the build
@@ -1979,6 +1975,10 @@
                           --with-memory-size=1024 [probed]
   --with-jobs             number of parallel jobs to let make run [calculated
                           based on cores and memory]
+  --with-boot-jdk-jvmargs specify JVM arguments to be passed to all java
+                          invocations of boot JDK, overriding the default
+                          values, e.g --with-boot-jdk-jvmargs="-Xmx8G
+                          -enableassertions"
   --with-sjavac-server-java
                           use this java binary for running the sjavac
                           background server [Boot JDK java]
@@ -4321,7 +4321,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1408448519
+DATE_WHEN_GENERATED=1409311712
 
 ###############################################################################
 #
@@ -17284,7 +17284,12 @@
   if test -f $DELETEDIR/TestIfFindSupportsDelete; then
     # No, it does not.
     rm $DELETEDIR/TestIfFindSupportsDelete
-    FIND_DELETE="-exec rm \{\} \+"
+    if test "x$OPENJDK_TARGET_OS" = "xaix"; then
+      # AIX 'find' is buggy if called with '-exec {} \+' and an empty file list
+      FIND_DELETE="-print | xargs rm"
+    else
+      FIND_DELETE="-exec rm \{\} \+"
+    fi
     { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
 $as_echo "no" >&6; }
   else
@@ -26315,197 +26320,6 @@
 
 
 
-  ##############################################################################
-  #
-  # Specify jvm options for anything that is run with the Boot JDK.
-  # Not all JVM:s accept the same arguments on the command line.
-  #
-
-# Check whether --with-boot-jdk-jvmargs was given.
-if test "${with_boot_jdk_jvmargs+set}" = set; then :
-  withval=$with_boot_jdk_jvmargs;
-fi
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
-$as_echo_n "checking flags for boot jdk java command ... " >&6; }
-
-  # Disable special log output when a debug build is used as Boot JDK...
-
-  $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
-  $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5
-  OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  # Apply user provided options.
-
-  $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
-  $ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5
-  OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5
-$as_echo "$boot_jdk_jvmargs" >&6; }
-
-  # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
-  JAVA_FLAGS=$boot_jdk_jvmargs
-
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
-$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
-
-  # Starting amount of heap memory.
-
-  $ECHO "Check if jvm arg is ok: -Xms64M" >&5
-  $ECHO "Command: $JAVA -Xms64M -version" >&5
-  OUTPUT=`$JAVA -Xms64M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  # Maximum amount of heap memory.
-  # Maximum stack size.
-  if test "x$BUILD_NUM_BITS" = x32; then
-    JVM_MAX_HEAP=1100M
-    STACK_SIZE=768
-  else
-    # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
-    # pointers are used. Apparently, we need to increase the heap and stack
-    # space for the jvm. More specifically, when running javac to build huge
-    # jdk batch
-    JVM_MAX_HEAP=1600M
-    STACK_SIZE=1536
-  fi
-
-  $ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5
-  $ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5
-  OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx$JVM_MAX_HEAP"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
-  $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
-  OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5
-$as_echo "$boot_jdk_jvmargs_big" >&6; }
-
-  JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
-
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5
-$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; }
-
-  # Use serial gc for small short lived tools if possible
-
-  $ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5
-  $ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5
-  OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  $ECHO "Check if jvm arg is ok: -Xms32M" >&5
-  $ECHO "Command: $JAVA -Xms32M -version" >&5
-  OUTPUT=`$JAVA -Xms32M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  $ECHO "Check if jvm arg is ok: -Xmx512M" >&5
-  $ECHO "Command: $JAVA -Xmx512M -version" >&5
-  OUTPUT=`$JAVA -Xmx512M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5
-$as_echo "$boot_jdk_jvmargs_small" >&6; }
-
-  JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
-
-
-  JAVA_TOOL_FLAGS_SMALL=""
-  for f in $JAVA_FLAGS_SMALL; do
-    JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f"
-  done
-
-
-
 ###############################################################################
 #
 # Configure the sources to use. We can add or override individual directories.
@@ -42515,17 +42329,15 @@
       # no adjustment
       ;;
     fastdebug )
-      # Add compile time bounds checks.
-      CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
-      CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
+      # no adjustment
       ;;
     slowdebug )
-      # Add runtime bounds checks and symbol info.
-      CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
-      CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
+      # Add runtime stack smashing and undefined behavior checks
+      CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
+      CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
       if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
         CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS  $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
-        CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
+        CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG"
       fi
       ;;
     esac
@@ -49884,8 +49696,8 @@
     # Number of jobs was not specified, calculate.
     { $as_echo "$as_me:${as_lineno-$LINENO}: checking for appropriate number of jobs to run in parallel" >&5
 $as_echo_n "checking for appropriate number of jobs to run in parallel... " >&6; }
-    # Approximate memory in GB, rounding up a bit.
-    memory_gb=`expr $MEMORY_SIZE / 1100`
+    # Approximate memory in GB.
+    memory_gb=`expr $MEMORY_SIZE / 1024`
     # Pick the lowest of memory in gb and number of cores.
     if test "$memory_gb" -lt "$NUM_CORES"; then
       JOBS="$memory_gb"
@@ -49911,6 +49723,208 @@
 
 
 
+# Setup arguments for the boot jdk (after cores and memory have been setup)
+
+  ##############################################################################
+  #
+  # Specify jvm options for anything that is run with the Boot JDK.
+  # Not all JVM:s accept the same arguments on the command line.
+  #
+
+# Check whether --with-boot-jdk-jvmargs was given.
+if test "${with_boot_jdk_jvmargs+set}" = set; then :
+  withval=$with_boot_jdk_jvmargs;
+fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
+$as_echo_n "checking flags for boot jdk java command ... " >&6; }
+
+  # Disable special log output when a debug build is used as Boot JDK...
+
+  $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
+  $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5
+  OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  # Apply user provided options.
+
+  $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
+  $ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5
+  OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5
+$as_echo "$boot_jdk_jvmargs" >&6; }
+
+  # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
+  JAVA_FLAGS=$boot_jdk_jvmargs
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
+$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
+
+  # Starting amount of heap memory.
+
+  $ECHO "Check if jvm arg is ok: -Xms64M" >&5
+  $ECHO "Command: $JAVA -Xms64M -version" >&5
+  OUTPUT=`$JAVA -Xms64M -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  # Maximum amount of heap memory.
+  # Maximum stack size.
+  JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
+  if test "x$BUILD_NUM_BITS" = x32; then
+    if test "$JVM_MAX_HEAP" -gt "1100"; then
+      JVM_MAX_HEAP=1100
+    elif test "$JVM_MAX_HEAP" -lt "512"; then
+      JVM_MAX_HEAP=512
+    fi
+    STACK_SIZE=768
+  else
+    # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
+    # pointers are used. Apparently, we need to increase the heap and stack
+    # space for the jvm. More specifically, when running javac to build huge
+    # jdk batch
+    if test "$JVM_MAX_HEAP" -gt "1600"; then
+      JVM_MAX_HEAP=1600
+    elif test "$JVM_MAX_HEAP" -lt "512"; then
+      JVM_MAX_HEAP=512
+    fi
+    STACK_SIZE=1536
+  fi
+
+  $ECHO "Check if jvm arg is ok: -Xmx${JVM_MAX_HEAP}M" >&5
+  $ECHO "Command: $JAVA -Xmx${JVM_MAX_HEAP}M -version" >&5
+  OUTPUT=`$JAVA -Xmx${JVM_MAX_HEAP}M -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx${JVM_MAX_HEAP}M"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
+  $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
+  OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5
+$as_echo "$boot_jdk_jvmargs_big" >&6; }
+
+  JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5
+$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; }
+
+  # Use serial gc for small short lived tools if possible
+
+  $ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5
+  $ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5
+  OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  $ECHO "Check if jvm arg is ok: -Xms32M" >&5
+  $ECHO "Command: $JAVA -Xms32M -version" >&5
+  OUTPUT=`$JAVA -Xms32M -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  $ECHO "Check if jvm arg is ok: -Xmx512M" >&5
+  $ECHO "Command: $JAVA -Xmx512M -version" >&5
+  OUTPUT=`$JAVA -Xmx512M -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5
+$as_echo "$boot_jdk_jvmargs_small" >&6; }
+
+  JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
+
+
+  JAVA_TOOL_FLAGS_SMALL=""
+  for f in $JAVA_FLAGS_SMALL; do
+    JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f"
+  done
+
+
+
 # Setup smart javac (after cores and memory have been setup)
 
 
@@ -49927,44 +49941,11 @@
       as_fn_error $? "Could not execute server java: $SJAVAC_SERVER_JAVA" "$LINENO" 5
     fi
   else
-    SJAVAC_SERVER_JAVA=""
-    # Hotspot specific options.
-
-  $ECHO "Check if jvm arg is ok: -verbosegc" >&5
-  $ECHO "Command: $JAVA -verbosegc -version" >&5
-  OUTPUT=`$JAVA -verbosegc -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -verbosegc"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-    # JRockit specific options.
-
-  $ECHO "Check if jvm arg is ok: -Xverbose:gc" >&5
-  $ECHO "Command: $JAVA -Xverbose:gc -version" >&5
-  OUTPUT=`$JAVA -Xverbose:gc -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xverbose:gc"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-    SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
-  fi
-
-
-  if test "$MEMORY_SIZE" -gt "2500"; then
+    SJAVAC_SERVER_JAVA="$JAVA"
+  fi
+
+
+  if test "$MEMORY_SIZE" -gt "3000"; then
 
   $ECHO "Check if jvm arg is ok: -d64" >&5
   $ECHO "Command: $SJAVAC_SERVER_JAVA -d64 -version" >&5
@@ -49986,67 +49967,31 @@
     fi
   fi
 
+  MX_VALUE=`expr $MEMORY_SIZE / 2`
   if test "$JVM_64BIT" = true; then
-    if test "$MEMORY_SIZE" -gt "17000"; then
-
-  $ECHO "Check if jvm arg is ok: -Xms10G -Xmx10G" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-    fi
-    if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms6G -Xmx6G" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version 2>&1`
+    # Set ms lower than mx since more than one instance of the server might
+    # get launched at the same time before they figure out which instance won.
+    MS_VALUE=512
+    if test "$MX_VALUE" -gt "2048"; then
+      MX_VALUE=2048
+    fi
+  else
+    MS_VALUE=256
+    if test "$MX_VALUE" -gt "1500"; then
+      MX_VALUE=1500
+    fi
+  fi
+  if test "$MX_VALUE" -lt "512"; then
+    MX_VALUE=512
+  fi
+
+  $ECHO "Check if jvm arg is ok: -Xms${MS_VALUE}M -Xmx${MX_VALUE}M" >&5
+  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version" >&5
+  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version 2>&1`
   FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
   FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
   if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-    fi
-    if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms1G -Xmx3G" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-    fi
-    if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms1G -Xmx2500M" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M"
+    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M"
     JVM_ARG_OK=true
   else
     $ECHO "Arg failed:" >&5
@@ -50054,62 +49999,7 @@
     JVM_ARG_OK=false
   fi
 
-    fi
-  fi
-  if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms1000M -Xmx1500M" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-  fi
-  if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms400M -Xmx1100M" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-  fi
-  if test "$JVM_ARG_OK" = false; then
-
-  $ECHO "Check if jvm arg is ok: -Xms256M -Xmx512M" >&5
-  $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version" >&5
-  OUTPUT=`$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version 2>&1`
-  FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
-  FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
-  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
-    SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M"
-    JVM_ARG_OK=true
-  else
-    $ECHO "Arg failed:" >&5
-    $ECHO "$OUTPUT" >&5
-    JVM_ARG_OK=false
-  fi
-
-  fi
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
-$as_echo_n "checking whether to use sjavac... " >&6; }
+
   # Check whether --enable-sjavac was given.
 if test "${enable_sjavac+set}" = set; then :
   enableval=$enable_sjavac; ENABLE_SJAVAC="${enableval}"
@@ -50117,6 +50007,13 @@
   ENABLE_SJAVAC='no'
 fi
 
+  if test "x$JVM_ARG_OK" = "xfalse"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&5
+$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&2;}
+    ENABLE_SJAVAC=no;
+  fi
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
+$as_echo_n "checking whether to use sjavac... " >&6; }
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ENABLE_SJAVAC" >&5
 $as_echo "$ENABLE_SJAVAC" >&6; }
 
--- a/common/bin/hgforest.sh	Fri Aug 29 11:58:43 2014 -0700
+++ b/common/bin/hgforest.sh	Wed Jul 05 19:58:32 2017 +0200
@@ -77,6 +77,11 @@
   shift
 done
 
+# debug mode
+if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
+  global_opts="${global_opts} --debug"
+fi
+
 # silence standard output?
 if [ ${qflag} = "true" ] ; then
   global_opts="${global_opts} -q"
@@ -89,14 +94,26 @@
 fi
 
 # Make sure we have a command.
-if [ $# -lt 1 -o -z "${1:-}" ] ; then
-  echo "ERROR: No command to hg supplied!"
-  usage
+if [ ${#} -lt 1 -o -z "${1:-}" ] ; then
+  echo "ERROR: No command to hg supplied!" > ${status_output}
+  usage > ${status_output}
 fi
 
-command="$1"; shift
+# grab command
+command="${1}"; shift
+
+if [ ${vflag} = "true" ] ; then
+  echo "# Mercurial command: ${command}" > ${status_output}
+fi
+
+
+# capture command options and arguments (if any)
 command_args="${@:-}"
 
+if [ ${vflag} = "true" ] ; then
+  echo "# Mercurial command arguments: ${command_args}" > ${status_output}
+fi
+
 # Clean out the temporary directory that stores the pid files.
 tmp=/tmp/forest.$$
 rm -f -r ${tmp}
@@ -104,7 +121,8 @@
 
 
 if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
-  echo "DEBUG: temp files are in: ${tmp}"
+  # ignores redirection.
+  echo "DEBUG: temp files are in: ${tmp}" >&2
 fi
 
 # Check if we can use fifos for monitoring sub-process completion.
@@ -377,21 +395,33 @@
       fi
     fi
   done
+
+  if [ ${have_fifos} = "true" ]; then
+    # done with the fifo
+    exec 3>&-
+  fi
 fi
 
 # Wait for all subprocesses to complete
 wait
 
 # Terminate with exit 0 only if all subprocesses were successful
+# Terminate with highest exit code of subprocesses
 ec=0
 if [ -d ${tmp} ]; then
   rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`"
   for rc in ${rcfiles} ; do
     exit_code=`cat ${rc} | tr -d ' \n\r'`
     if [ "${exit_code}" != "0" ] ; then
+      if [ ${exit_code} -gt 1 ]; then
+        # mercurial exit codes greater than "1" signal errors.
       repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
       echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output}
-      ec=1
+      fi
+      if [ ${exit_code} -gt ${ec} ]; then
+        # assume that larger exit codes are more significant
+        ec=${exit_code}
+      fi
     fi
   done
 fi
--- a/get_source.sh	Fri Aug 29 11:58:43 2014 -0700
+++ b/get_source.sh	Wed Jul 05 19:58:32 2017 +0200
@@ -67,7 +67,7 @@
   error "Could not locate Mercurial command"
 fi
 
-hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
+hgversion="`LANGUAGE=en hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
 if [ "x${hgversion}" = "x" ] ; then
   error "Could not determine Mercurial version of $hgwhere"
 fi
--- a/hotspot/.hgtags	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 19:58:32 2017 +0200
@@ -430,3 +430,4 @@
 6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25
 48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26
 f95347244306affc32ce3056f27ceff7b2100810 jdk9-b27
+657294869d7ff063e055f5492cab7ce5612ca851 jdk9-b28
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Wed Jul 05 19:58:32 2017 +0200
@@ -45,8 +45,8 @@
 public class G1CollectedHeap extends SharedHeap {
     // HeapRegionSeq _seq;
     static private long hrsFieldOffset;
-    // MemRegion _g1_committed;
-    static private long g1CommittedFieldOffset;
+    // MemRegion _g1_reserved;
+    static private long g1ReservedFieldOffset;
     // size_t _summary_bytes_used;
     static private CIntegerField summaryBytesUsedField;
     // G1MonitoringSupport* _g1mm;
@@ -68,7 +68,6 @@
         Type type = db.lookupType("G1CollectedHeap");
 
         hrsFieldOffset = type.getField("_hrs").getOffset();
-        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
         summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
@@ -76,9 +75,7 @@
     }
 
     public long capacity() {
-        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
-        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
-        return g1Committed.byteSize();
+        return hrs().capacity();
     }
 
     public long used() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Wed Jul 05 19:58:32 2017 +0200
@@ -93,19 +93,35 @@
     private class HeapRegionIterator implements Iterator<HeapRegion> {
         private long index;
         private long length;
+        private HeapRegion next;
 
-        @Override
-        public boolean hasNext() { return index < length; }
+        public HeapRegion positionToNext() {
+          HeapRegion result = next;
+          while (index < length && at(index) == null) {
+            index++;
+          }
+          if (index < length) {
+            next = at(index);
+            index++; // restart search at next element
+          } else {
+            next = null;
+          }
+          return result;
+        }
 
         @Override
-        public HeapRegion next() { return at(index++);    }
+        public boolean hasNext() { return next != null;     }
+
+        @Override
+        public HeapRegion next() { return positionToNext(); }
 
         @Override
-        public void remove()     { /* not supported */    }
+        public void remove()     { /* not supported */      }
 
-        HeapRegionIterator(long committedLength) {
+        HeapRegionIterator(long totalLength) {
             index = 0;
-            length = committedLength;
+            length = totalLength;
+            positionToNext();
         }
     }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Wed Jul 05 19:58:32 2017 +0200
@@ -43,7 +43,7 @@
     // G1HeapRegionTable _regions
     static private long regionsFieldOffset;
     // uint _committed_length
-    static private CIntegerField committedLengthField;
+    static private CIntegerField numCommittedField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -57,7 +57,7 @@
         Type type = db.lookupType("HeapRegionSeq");
 
         regionsFieldOffset = type.getField("_regions").getOffset();
-        committedLengthField = type.getCIntegerField("_committed_length");
+        numCommittedField = type.getCIntegerField("_num_committed");
     }
 
     private G1HeapRegionTable regions() {
@@ -66,16 +66,20 @@
                                                              regionsAddr);
     }
 
+    public long capacity() {
+        return length() * HeapRegion.grainBytes();
+    }
+
     public long length() {
         return regions().length();
     }
 
     public long committedLength() {
-        return committedLengthField.getValue(addr);
+        return numCommittedField.getValue(addr);
     }
 
     public Iterator<HeapRegion> heapRegionIterator() {
-        return regions().heapRegionIterator(committedLength());
+        return regions().heapRegionIterator(length());
     }
 
     public HeapRegionSeq(Address addr) {
--- a/hotspot/make/bsd/makefiles/gcc.make	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Wed Jul 05 19:58:32 2017 +0200
@@ -508,13 +508,9 @@
 
 ifeq ($(USE_CLANG),)
   # Enable bounds checking.
-  # _FORTIFY_SOURCE appears in GCC 4.0+
   ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
-    # compile time size bounds checks
-    FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
-
-    # and runtime size bounds checks and paranoid stack smashing checks.
-    DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
+    # stack smashing checks.
+    DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
   endif
 endif
 
--- a/hotspot/make/excludeSrc.make	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/make/excludeSrc.make	Wed Jul 05 19:58:32 2017 +0200
@@ -70,7 +70,8 @@
       CXXFLAGS += -DINCLUDE_CDS=0
       CFLAGS += -DINCLUDE_CDS=0
 
-      Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
+      Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
+        systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
 endif
 
 ifeq ($(INCLUDE_ALL_GCS), false)
--- a/hotspot/make/jprt.properties	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/make/jprt.properties	Wed Jul 05 19:58:32 2017 +0200
@@ -374,6 +374,7 @@
   ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
   ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
   ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
+  ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
   ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
 
 jprt.make.rule.test.targets.embedded = \
--- a/hotspot/make/linux/makefiles/gcc.make	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Wed Jul 05 19:58:32 2017 +0200
@@ -365,16 +365,13 @@
 
 ifeq ($(USE_CLANG),)
   # Enable bounds checking.
-  # _FORTIFY_SOURCE appears in GCC 4.0+
   ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
-    # compile time size bounds checks
-    FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
-
-    # and runtime size bounds checks and paranoid stack smashing checks.
-    DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
+    # stack smashing checks.
+    DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
   endif
 endif
 
+
 # If we are building HEADLESS, pass on to VM
 # so it can set the java.awt.headless property
 ifdef HEADLESS
--- a/hotspot/make/solaris/makefiles/gcc.make	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/make/solaris/makefiles/gcc.make	Wed Jul 05 19:58:32 2017 +0200
@@ -240,11 +240,7 @@
 endif
 
 # Enable bounds checking.
-# _FORTIFY_SOURCE appears in GCC 4.0+
 ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
-  # compile time size bounds checks
-  FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
-
-  # and runtime size bounds checks and paranoid stack smashing checks.
-  DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
+  # stack smashing checks.
+  DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
 endif
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -731,7 +731,7 @@
     if (method->is_static())
       object = method->constants()->pool_holder()->java_mirror();
     else
-      object = (oop) locals[0];
+      object = (oop) (void*)locals[0];
     monitor->set_obj(object);
   }
 
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,8 @@
 #ifndef CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
 #define CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
 
+#include "code/codeCache.hpp"
+
 // Constructors
 
 inline frame::frame() {
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -2246,7 +2246,7 @@
   const siginfo_t* si = (const siginfo_t*)siginfo;
 
   os::Posix::print_siginfo_brief(st, si);
-
+#if INCLUDE_CDS
   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
       UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
@@ -2256,6 +2256,7 @@
                 " possible disk/network problem.");
     }
   }
+#endif
   st->cr();
 }
 
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -66,129 +66,92 @@
 }
 
 int VM_Version::platform_features(int features) {
-  // getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
-  // supported on Solaris 10 and later.
-  if (os::Solaris::supports_getisax()) {
+  assert(os::Solaris::supports_getisax(), "getisax() must be available");
 
-    // Check 32-bit architecture.
-    do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
+  // Check 32-bit architecture.
+  do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
 
-    // Check 64-bit architecture.
-    do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
+  // Check 64-bit architecture.
+  do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
 
-    // Extract valid instruction set extensions.
-    uint_t avs[2];
-    uint_t avn = os::Solaris::getisax(avs, 2);
-    assert(avn <= 2, "should return two or less av's");
-    uint_t av = avs[0];
+  // Extract valid instruction set extensions.
+  uint_t avs[2];
+  uint_t avn = os::Solaris::getisax(avs, 2);
+  assert(avn <= 2, "should return two or less av's");
+  uint_t av = avs[0];
 
 #ifndef PRODUCT
-    if (PrintMiscellaneous && Verbose) {
-      tty->print("getisax(2) returned: " PTR32_FORMAT, av);
-      if (avn > 1) {
-        tty->print(", " PTR32_FORMAT, avs[1]);
-      }
-      tty->cr();
+  if (PrintMiscellaneous && Verbose) {
+    tty->print("getisax(2) returned: " PTR32_FORMAT, av);
+    if (avn > 1) {
+      tty->print(", " PTR32_FORMAT, avs[1]);
     }
+    tty->cr();
+  }
 #endif
 
-    if (av & AV_SPARC_MUL32)  features |= hardware_mul32_m;
-    if (av & AV_SPARC_DIV32)  features |= hardware_div32_m;
-    if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
-    if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
-    if (av & AV_SPARC_POPC)   features |= hardware_popc_m;
-    if (av & AV_SPARC_VIS)    features |= vis1_instructions_m;
-    if (av & AV_SPARC_VIS2)   features |= vis2_instructions_m;
-    if (avn > 1) {
-      uint_t av2 = avs[1];
+  if (av & AV_SPARC_MUL32)  features |= hardware_mul32_m;
+  if (av & AV_SPARC_DIV32)  features |= hardware_div32_m;
+  if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
+  if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
+  if (av & AV_SPARC_POPC)   features |= hardware_popc_m;
+  if (av & AV_SPARC_VIS)    features |= vis1_instructions_m;
+  if (av & AV_SPARC_VIS2)   features |= vis2_instructions_m;
+  if (avn > 1) {
+    uint_t av2 = avs[1];
 #ifndef AV2_SPARC_SPARC5
 #define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
 #endif
-      if (av2 & AV2_SPARC_SPARC5)       features |= sparc5_instructions_m;
-    }
+    if (av2 & AV2_SPARC_SPARC5)       features |= sparc5_instructions_m;
+  }
 
-    // Next values are not defined before Solaris 10
-    // but Solaris 8 is used for jdk6 update builds.
+  // We only build on Solaris 10 and up, but some of the values below
+  // are not defined on all versions of Solaris 10, so we define them,
+  // if necessary.
 #ifndef AV_SPARC_ASI_BLK_INIT
 #define AV_SPARC_ASI_BLK_INIT 0x0080  /* ASI_BLK_INIT_xxx ASI */
 #endif
-    if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
+  if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
 
 #ifndef AV_SPARC_FMAF
 #define AV_SPARC_FMAF 0x0100        /* Fused Multiply-Add */
 #endif
-    if (av & AV_SPARC_FMAF)         features |= fmaf_instructions_m;
+  if (av & AV_SPARC_FMAF)         features |= fmaf_instructions_m;
 
 #ifndef AV_SPARC_FMAU
-#define    AV_SPARC_FMAU    0x0200  /* Unfused Multiply-Add */
+#define AV_SPARC_FMAU    0x0200  /* Unfused Multiply-Add */
 #endif
-    if (av & AV_SPARC_FMAU)         features |= fmau_instructions_m;
+  if (av & AV_SPARC_FMAU)         features |= fmau_instructions_m;
 
 #ifndef AV_SPARC_VIS3
-#define    AV_SPARC_VIS3    0x0400  /* VIS3 instruction set extensions */
+#define AV_SPARC_VIS3    0x0400  /* VIS3 instruction set extensions */
 #endif
-    if (av & AV_SPARC_VIS3)         features |= vis3_instructions_m;
+  if (av & AV_SPARC_VIS3)         features |= vis3_instructions_m;
 
 #ifndef AV_SPARC_CBCOND
 #define AV_SPARC_CBCOND 0x10000000  /* compare and branch instrs supported */
 #endif
-    if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
+  if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
 
 #ifndef AV_SPARC_AES
 #define AV_SPARC_AES 0x00020000  /* aes instrs supported */
 #endif
-    if (av & AV_SPARC_AES)       features |= aes_instructions_m;
+  if (av & AV_SPARC_AES)       features |= aes_instructions_m;
 
 #ifndef AV_SPARC_SHA1
 #define AV_SPARC_SHA1   0x00400000  /* sha1 instruction supported */
 #endif
-    if (av & AV_SPARC_SHA1)         features |= sha1_instruction_m;
+  if (av & AV_SPARC_SHA1)         features |= sha1_instruction_m;
 
 #ifndef AV_SPARC_SHA256
 #define AV_SPARC_SHA256 0x00800000  /* sha256 instruction supported */
 #endif
-    if (av & AV_SPARC_SHA256)       features |= sha256_instruction_m;
+  if (av & AV_SPARC_SHA256)       features |= sha256_instruction_m;
 
 #ifndef AV_SPARC_SHA512
 #define AV_SPARC_SHA512 0x01000000  /* sha512 instruction supported */
 #endif
-    if (av & AV_SPARC_SHA512)       features |= sha512_instruction_m;
-
-  } else {
-    // getisax(2) failed, use the old legacy code.
-#ifndef PRODUCT
-    if (PrintMiscellaneous && Verbose)
-      tty->print_cr("getisax(2) is not supported.");
-#endif
-
-    char   tmp;
-    size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
-    char*  buf     = (char*) os::malloc(bufsize, mtInternal);
-
-    if (buf != NULL) {
-      if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
-        // Figure out what kind of sparc we have
-        char *sparc_string = strstr(buf, "sparc");
-        if (sparc_string != NULL) {              features |= v8_instructions_m;
-          if (sparc_string[5] == 'v') {
-            if (sparc_string[6] == '8') {
-              if (sparc_string[7] == '-') {      features |= hardware_mul32_m;
-                                                 features |= hardware_div32_m;
-              } else if (sparc_string[7] == 'p') features |= generic_v9_m;
-              else                               features |= generic_v8_m;
-            } else if (sparc_string[6] == '9')   features |= generic_v9_m;
-          }
-        }
-
-        // Check for visualization instructions
-        char *vis = strstr(buf, "vis");
-        if (vis != NULL) {                       features |= vis1_instructions_m;
-          if (vis[3] == '2')                     features |= vis2_instructions_m;
-        }
-      }
-      os::free(buf);
-    }
-  }
+  if (av & AV_SPARC_SHA512)       features |= sha512_instruction_m;
 
   // Determine the machine type.
   do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
@@ -203,27 +166,7 @@
         kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
         for (int i = 0; i < ksp->ks_ndata; i++) {
           if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
-#ifndef KSTAT_DATA_STRING
-#define KSTAT_DATA_STRING   9
-#endif
-            if (knm[i].data_type == KSTAT_DATA_CHAR) {
-              // VM is running on Solaris 8 which does not have value.str.
-              implementation = &(knm[i].value.c[0]);
-            } else if (knm[i].data_type == KSTAT_DATA_STRING) {
-              // VM is running on Solaris 10.
-#ifndef KSTAT_NAMED_STR_PTR
-              // Solaris 8 was used to build VM, define the structure it misses.
-              struct str_t {
-                union {
-                  char *ptr;     /* NULL-term string */
-                  char __pad[8]; /* 64-bit padding */
-                } addr;
-                uint32_t len;    /* # bytes for strlen + '\0' */
-              };
-#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
-#endif
-              implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
-            }
+            implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
 #ifndef PRODUCT
             if (PrintMiscellaneous && Verbose) {
               tty->print_cr("cpu_info.implementation: %s", implementation);
@@ -234,6 +177,7 @@
 
             for (int i = 0; impl[i] != 0; i++)
               impl[i] = (char)toupper((uint)impl[i]);
+
             if (strstr(impl, "SPARC64") != NULL) {
               features |= sparc64_family_m;
             } else if (strstr(impl, "SPARC-M") != NULL) {
@@ -248,8 +192,10 @@
               if (strstr(impl, "SPARC") == NULL) {
 #ifndef PRODUCT
                 // kstat on Solaris 8 virtual machines (branded zones)
-                // returns "(unsupported)" implementation.
-                warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
+                // returns "(unsupported)" implementation. Solaris 8 is not
+                // supported anymore, but include this check to be on the
+                // safe side.
+                warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl);
 #endif
                 implementation = "SPARC";
               }
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -546,13 +546,18 @@
     // normal bytecode execution.
     thread->clear_exception_oop_and_pc();
 
+    Handle original_exception(thread, exception());
+
     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
     // If an exception was thrown during exception dispatch, the exception oop may have changed
     thread->set_exception_oop(exception());
     thread->set_exception_pc(pc);
 
     // the exception cache is used only by non-implicit exceptions
-    if (continuation != NULL) {
+    // Update the exception cache only when there didn't happen
+    // another exception during the computation of the compiled
+    // exception handler.
+    if (continuation != NULL && original_exception() == exception()) {
       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
     }
   }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -31,6 +31,9 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -60,6 +63,7 @@
 #include "services/threadService.hpp"
 #include "utilities/array.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
 
 // We generally try to create the oops directly when parsing, rather than
 // allocating temporary data structures and copying the bytes twice. A
@@ -3786,7 +3790,15 @@
   instanceKlassHandle nullHandle;
 
   // Figure out whether we can skip format checking (matching classic VM behavior)
-  _need_verify = Verifier::should_verify_for(class_loader(), verify);
+  if (DumpSharedSpaces) {
+    // verify == true means it's a 'remote' class (i.e., non-boot class)
+    // Verification decision is based on BytecodeVerificationRemote flag
+    // for those classes.
+    _need_verify = (verify) ? BytecodeVerificationRemote :
+                              BytecodeVerificationLocal;
+  } else {
+    _need_verify = Verifier::should_verify_for(class_loader(), verify);
+  }
 
   // Set the verify flag in stream
   cfs->set_verify(_need_verify);
@@ -3805,6 +3817,18 @@
   u2 minor_version = cfs->get_u2_fast();
   u2 major_version = cfs->get_u2_fast();
 
+  if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
+    ResourceMark rm;
+    warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
+            major_version,  minor_version, name->as_C_string());
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbols::java_lang_UnsupportedClassVersionError(),
+      "Unsupported major.minor version for dump time %u.%u",
+      major_version,
+      minor_version);
+  }
+
   // Check version numbers - we check this even with verifier off
   if (!is_supported_version(major_version, minor_version)) {
     if (name == NULL) {
@@ -3912,6 +3936,18 @@
       if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
       tty->print_cr("]");
     }
+#if INCLUDE_CDS
+    if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
+      // Only dump the classes that can be stored into CDS archive
+      if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+        if (name != NULL) {
+          ResourceMark rm(THREAD);
+          classlist_file->print_cr("%s", name->as_C_string());
+          classlist_file->flush();
+        }
+      }
+    }
+#endif
 
     u2 super_class_index = cfs->get_u2_fast();
     instanceKlassHandle super_klass = parse_super_class(super_class_index,
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,8 +26,13 @@
 #include "classfile/classFileParser.hpp"
 #include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#endif
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -35,6 +40,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/filemap.hpp"
 #include "memory/generation.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
@@ -114,8 +120,12 @@
 
 ClassPathEntry* ClassLoader::_first_entry         = NULL;
 ClassPathEntry* ClassLoader::_last_entry          = NULL;
+int             ClassLoader::_num_entries         = 0;
 PackageHashtable* ClassLoader::_package_hash_table = NULL;
 
+#if INCLUDE_CDS
+SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
+#endif
 // helper routines
 bool string_starts_with(const char* str, const char* str_to_find) {
   size_t str_len = strlen(str);
@@ -194,6 +204,14 @@
   // check if file exists
   struct stat st;
   if (os::stat(path, &st) == 0) {
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      // We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
+      // we should never find a file underneath it -- unless user has added a new file while we are running
+      // the dump, in which case let's quit!
+      ShouldNotReachHere();
+    }
+#endif
     // found file, open it
     int file_handle = os::open(path, 0, 0);
     if (file_handle != -1) {
@@ -228,13 +246,13 @@
   FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
 }
 
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
-  // enable call to C land
+u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+    // enable call to C land
   JavaThread* thread = JavaThread::current();
   ThreadToNativeFromVM ttn(thread);
   // check whether zip archive contains name
-  jint filesize, name_len;
-  jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
+  jint name_len;
+  jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
   if (entry == NULL) return NULL;
   u1* buffer;
   char name_buf[128];
@@ -245,19 +263,33 @@
     filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
   }
 
-  // file found, get pointer to class in mmaped jar file.
+  // file found, get pointer to the entry in mmapped jar file.
   if (ReadMappedEntry == NULL ||
       !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
-      // mmaped access not available, perhaps due to compression,
+      // mmapped access not available, perhaps due to compression,
       // read contents into resource array
-      buffer     = NEW_RESOURCE_ARRAY(u1, filesize);
+      int size = (*filesize) + ((nul_terminate) ? 1 : 0);
+      buffer = NEW_RESOURCE_ARRAY(u1, size);
       if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
   }
+
+  // return result
+  if (nul_terminate) {
+    buffer[*filesize] = 0;
+  }
+  return buffer;
+}
+
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
+  jint filesize;
+  u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
+  if (buffer == NULL) {
+    return NULL;
+  }
   if (UsePerfData) {
     ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
   }
-  // return result
-  return new ClassFileStream(buffer, filesize, _zip_name);    // Resource allocated
+  return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
 }
 
 // invoke function for each entry in the zip file
@@ -272,12 +304,13 @@
   }
 }
 
-LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
   _path = os::strdup_check_oom(path);
   _st = *st;
   _meta_index = NULL;
   _resolved_entry = NULL;
   _has_error = false;
+  _throw_exception = throw_exception;
 }
 
 LazyClassPathEntry::~LazyClassPathEntry() {
@@ -293,7 +326,11 @@
     return (ClassPathEntry*) _resolved_entry;
   }
   ClassPathEntry* new_entry = NULL;
-  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
+  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL);
+  if (!_throw_exception && new_entry == NULL) {
+    assert(!HAS_PENDING_EXCEPTION, "must be");
+    return NULL;
+  }
   {
     ThreadCritical tc;
     if (_resolved_entry == NULL) {
@@ -327,6 +364,23 @@
   return true;
 }
 
+u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+  if (_has_error) {
+    return NULL;
+  }
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe == NULL) {
+    _has_error = true;
+    return NULL;
+  } else if (cpe->is_jar_file()) {
+    return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD);
+  } else {
+    ShouldNotReachHere();
+    *filesize = 0;
+    return NULL;
+  }
+}
+
 static void print_meta_index(LazyClassPathEntry* entry,
                              GrowableArray<char*>& meta_packages) {
   tty->print("[Meta index for %s=", entry->name());
@@ -337,15 +391,62 @@
   tty->print_cr("]");
 }
 
+#if INCLUDE_CDS
+void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
+  assert(DumpSharedSpaces, "only called at dump time");
+  tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
+  vm_exit_during_initialization(error, message);
+}
+#endif
 
-void ClassLoader::setup_meta_index() {
+void ClassLoader::trace_class_path(const char* msg, const char* name) {
+  if (!TraceClassPaths) {
+    return;
+  }
+
+  if (msg) {
+    tty->print("%s", msg);
+  }
+  if (name) {
+    if (strlen(name) < 256) {
+      tty->print("%s", name);
+    } else {
+      // For very long paths, we need to print each character separately,
+      // as print_cr() has a length limit
+      while (name[0] != '\0') {
+        tty->print("%c", name[0]);
+        name++;
+      }
+    }
+  }
+  if (msg && msg[0] == '[') {
+    tty->print_cr("]");
+  } else {
+    tty->cr();
+  }
+}
+
+void ClassLoader::setup_bootstrap_meta_index() {
   // Set up meta index which allows us to open boot jars lazily if
   // class data sharing is enabled
+  const char* meta_index_path = Arguments::get_meta_index_path();
+  const char* meta_index_dir  = Arguments::get_meta_index_dir();
+  setup_meta_index(meta_index_path, meta_index_dir, 0);
+}
+
+void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) {
   const char* known_version = "% VERSION 2";
-  char* meta_index_path = Arguments::get_meta_index_path();
-  char* meta_index_dir  = Arguments::get_meta_index_dir();
   FILE* file = fopen(meta_index_path, "r");
   int line_no = 0;
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    if (file != NULL) {
+      _shared_paths_misc_info->add_required_file(meta_index_path);
+    } else {
+      _shared_paths_misc_info->add_nonexist_path(meta_index_path);
+    }
+  }
+#endif
   if (file != NULL) {
     ResourceMark rm;
     LazyClassPathEntry* cur_entry = NULL;
@@ -380,7 +481,7 @@
           // Hand off current packages to current lazy entry (if any)
           if ((cur_entry != NULL) &&
               (boot_class_path_packages.length() > 0)) {
-            if (TraceClassLoading && Verbose) {
+            if ((TraceClassLoading || TraceClassPaths) && Verbose) {
               print_meta_index(cur_entry, boot_class_path_packages);
             }
             MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -391,8 +492,10 @@
           boot_class_path_packages.clear();
 
           // Find lazy entry corresponding to this jar file
-          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
-            if (entry->is_lazy() &&
+          int count = 0;
+          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) {
+            if (count >= start_index &&
+                entry->is_lazy() &&
                 string_starts_with(entry->name(), meta_index_dir) &&
                 string_ends_with(entry->name(), &package_name[2])) {
               cur_entry = (LazyClassPathEntry*) entry;
@@ -429,7 +532,7 @@
     // Hand off current packages to current lazy entry (if any)
     if ((cur_entry != NULL) &&
         (boot_class_path_packages.length() > 0)) {
-      if (TraceClassLoading && Verbose) {
+      if ((TraceClassLoading || TraceClassPaths) && Verbose) {
         print_meta_index(cur_entry, boot_class_path_packages);
       }
       MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -440,37 +543,88 @@
   }
 }
 
+#if INCLUDE_CDS
+void ClassLoader::check_shared_classpath(const char *path) {
+  if (strcmp(path, "") == 0) {
+    exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
+  }
+
+  struct stat st;
+  if (os::stat(path, &st) == 0) {
+    if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory
+      if (!os::dir_is_empty(path)) {
+        tty->print_cr("Error: non-empty directory '%s'", path);
+        exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
+      }
+    }
+  }
+}
+#endif
+
 void ClassLoader::setup_bootstrap_search_path() {
   assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
   char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
-  if (TraceClassLoading && Verbose) {
-    tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
+  if (!PrintSharedArchiveAndExit) {
+    trace_class_path("[Bootstrap loader class path=", sys_class_path);
+  }
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    _shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
   }
+#endif
+  setup_search_path(sys_class_path);
+  os::free(sys_class_path);
+}
 
-  int len = (int)strlen(sys_class_path);
+#if INCLUDE_CDS
+int ClassLoader::get_shared_paths_misc_info_size() {
+  return _shared_paths_misc_info->get_used_bytes();
+}
+
+void* ClassLoader::get_shared_paths_misc_info() {
+  return _shared_paths_misc_info->buffer();
+}
+
+bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
+  SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size);
+  bool result = checker->check();
+  delete checker;
+  return result;
+}
+#endif
+
+void ClassLoader::setup_search_path(char *class_path) {
+  int offset = 0;
+  int len = (int)strlen(class_path);
   int end = 0;
 
   // Iterate over class path entries
   for (int start = 0; start < len; start = end) {
-    while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
+    while (class_path[end] && class_path[end] != os::path_separator()[0]) {
       end++;
     }
-    char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
-    strncpy(path, &sys_class_path[start], end-start);
-    path[end-start] = '\0';
+    EXCEPTION_MARK;
+    ResourceMark rm(THREAD);
+    char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
+    strncpy(path, &class_path[start], end - start);
+    path[end - start] = '\0';
     update_class_path_entry_list(path, false);
-    FREE_C_HEAP_ARRAY(char, path, mtClass);
-    while (sys_class_path[end] == os::path_separator()[0]) {
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      check_shared_classpath(path);
+    }
+#endif
+    while (class_path[end] == os::path_separator()[0]) {
       end++;
     }
   }
-  os::free(sys_class_path);
 }
 
-ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
+ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
+                                                     bool lazy, bool throw_exception, TRAPS) {
   JavaThread* thread = JavaThread::current();
   if (lazy) {
-    return new LazyClassPathEntry(path, st);
+    return new LazyClassPathEntry(path, st, throw_exception);
   }
   ClassPathEntry* new_entry = NULL;
   if ((st->st_mode & S_IFREG) == S_IFREG) {
@@ -479,7 +633,11 @@
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
       // This matches the classic VM
-      THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+      if (throw_exception) {
+        THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+      } else {
+        return NULL;
+      }
     }
     char* error_msg = NULL;
     jzfile* zip;
@@ -491,7 +649,7 @@
     }
     if (zip != NULL && error_msg == NULL) {
       new_entry = new ClassPathZipEntry(zip, path);
-      if (TraceClassLoading) {
+      if (TraceClassLoading || TraceClassPaths) {
         tty->print_cr("[Opened %s]", path);
       }
     } else {
@@ -505,12 +663,16 @@
         msg = NEW_RESOURCE_ARRAY(char, len); ;
         jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
       }
-      THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+      if (throw_exception) {
+        THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+      } else {
+        return NULL;
+      }
     }
   } else {
     // Directory
     new_entry = new ClassPathDirEntry(path);
-    if (TraceClassLoading) {
+    if (TraceClassLoading || TraceClassPaths) {
       tty->print_cr("[Path %s]", path);
     }
   }
@@ -571,23 +733,37 @@
       _last_entry = new_entry;
     }
   }
+  _num_entries ++;
 }
 
-void ClassLoader::update_class_path_entry_list(char *path,
-                                               bool check_for_duplicates) {
+// Returns true IFF the file/dir exists and the entry was successfully created.
+bool ClassLoader::update_class_path_entry_list(char *path,
+                                               bool check_for_duplicates,
+                                               bool throw_exception) {
   struct stat st;
   if (os::stat(path, &st) == 0) {
     // File or directory found
     ClassPathEntry* new_entry = NULL;
     Thread* THREAD = Thread::current();
-    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
+    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false));
+    if (new_entry == NULL) {
+      return false;
+    }
     // The kernel VM adds dynamically to the end of the classloader path and
     // doesn't reorder the bootclasspath which would break java.lang.Package
     // (see PackageInfo).
     // Add new entry to linked list
     if (!check_for_duplicates || !contains_entry(new_entry)) {
-      add_to_list(new_entry);
+      ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
     }
+    return true;
+  } else {
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      _shared_paths_misc_info->add_nonexist_path(path);
+    }
+    return false;
+#endif
   }
 }
 
@@ -739,10 +915,10 @@
     assert(n == number_of_entries(), "just checking");
   }
 
-  void copy_table(char** top, char* end, PackageHashtable* table);
+  CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);)
 };
 
-
+#if INCLUDE_CDS
 void PackageHashtable::copy_table(char** top, char* end,
                                   PackageHashtable* table) {
   // Copy (relocate) the table to the shared space.
@@ -750,33 +926,30 @@
 
   // Calculate the space needed for the package name strings.
   int i;
-  int n = 0;
-  for (i = 0; i < table_size(); ++i) {
-    for (PackageInfo* pp = table->bucket(i);
-                      pp != NULL;
-                      pp = pp->next()) {
-      n += (int)(strlen(pp->pkgname()) + 1);
-    }
-  }
-  if (*top + n + sizeof(intptr_t) >= end) {
-    report_out_of_shared_space(SharedMiscData);
-  }
-
-  // Copy the table data (the strings) to the shared space.
-  n = align_size_up(n, sizeof(HeapWord));
-  *(intptr_t*)(*top) = n;
-  *top += sizeof(intptr_t);
+  intptr_t* tableSize = (intptr_t*)(*top);
+  *top += sizeof(intptr_t);  // For table size
+  char* tableStart = *top;
 
   for (i = 0; i < table_size(); ++i) {
     for (PackageInfo* pp = table->bucket(i);
                       pp != NULL;
                       pp = pp->next()) {
       int n1 = (int)(strlen(pp->pkgname()) + 1);
+      if (*top + n1 >= end) {
+        report_out_of_shared_space(SharedMiscData);
+      }
       pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
       *top += n1;
     }
   }
   *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
+  if (*top >= end) {
+    report_out_of_shared_space(SharedMiscData);
+  }
+
+  // Write table size
+  intptr_t len = *top - (char*)tableStart;
+  *tableSize = len;
 }
 
 
@@ -787,7 +960,7 @@
 void ClassLoader::copy_package_info_table(char** top, char* end) {
   _package_hash_table->copy_table(top, end, _package_hash_table);
 }
-
+#endif
 
 PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
   const char *cp = strrchr(pkgname, '/');
@@ -880,7 +1053,8 @@
 
 instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
   ResourceMark rm(THREAD);
-  EventMark m("loading class %s", h_name->as_C_string());
+  const char* class_name = h_name->as_C_string();
+  EventMark m("loading class %s", class_name);
   ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
 
   stringStream st;
@@ -888,18 +1062,24 @@
   // st.print("%s.class", h_name->as_utf8());
   st.print_raw(h_name->as_utf8());
   st.print_raw(".class");
-  char* name = st.as_string();
+  const char* file_name = st.as_string();
+  ClassLoaderExt::Context context(class_name, file_name, THREAD);
 
   // Lookup stream for parsing .class file
   ClassFileStream* stream = NULL;
   int classpath_index = 0;
+  ClassPathEntry* e = NULL;
+  instanceKlassHandle h;
   {
     PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
                                ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
                                PerfClassTraceTime::CLASS_LOAD);
-    ClassPathEntry* e = _first_entry;
+    e = _first_entry;
     while (e != NULL) {
-      stream = e->open_stream(name, CHECK_NULL);
+      stream = e->open_stream(file_name, CHECK_NULL);
+      if (!context.check(stream, classpath_index)) {
+        return h; // NULL
+      }
       if (stream != NULL) {
         break;
       }
@@ -908,9 +1088,7 @@
     }
   }
 
-  instanceKlassHandle h;
   if (stream != NULL) {
-
     // class file found, parse it
     ClassFileParser parser(stream);
     ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@@ -920,12 +1098,19 @@
                                                        loader_data,
                                                        protection_domain,
                                                        parsed_name,
-                                                       false,
-                                                       CHECK_(h));
-
-    // add to package table
-    if (add_package(name, classpath_index, THREAD)) {
-      h = result;
+                                                       context.should_verify(classpath_index),
+                                                       THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      ResourceMark rm;
+      if (DumpSharedSpaces) {
+        tty->print_cr("Preload Error: Failed to load %s", class_name);
+      }
+      return h;
+    }
+    h = context.record_result(classpath_index, e, result, THREAD);
+  } else {
+    if (DumpSharedSpaces) {
+      tty->print_cr("Preload Error: Cannot find %s", class_name);
     }
   }
 
@@ -1020,14 +1205,27 @@
 
   // lookup zip library entry points
   load_zip_library();
+#if INCLUDE_CDS
   // initialize search path
+  if (DumpSharedSpaces) {
+    _shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info();
+  }
+#endif
   setup_bootstrap_search_path();
   if (LazyBootClassLoader) {
     // set up meta index which makes boot classpath initialization lazier
-    setup_meta_index();
+    setup_bootstrap_meta_index();
   }
 }
 
+#if INCLUDE_CDS
+void ClassLoader::initialize_shared_path() {
+  if (DumpSharedSpaces) {
+    ClassLoaderExt::setup_search_paths();
+    _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
+  }
+}
+#endif
 
 jlong ClassLoader::classloader_time_ms() {
   return UsePerfData ?
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -107,6 +107,7 @@
   const char* name()  { return _zip_name; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
+  u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
   ClassFileStream* open_stream(const char* name, TRAPS);
   void contents_do(void f(const char* name, void* context), void* context);
   // Debugging
@@ -122,13 +123,15 @@
   struct stat _st;
   MetaIndex* _meta_index;
   bool _has_error;
+  bool _throw_exception;
   volatile ClassPathEntry* _resolved_entry;
   ClassPathEntry* resolve_entry(TRAPS);
  public:
   bool is_jar_file();
   const char* name()  { return _path; }
-  LazyClassPathEntry(char* path, const struct stat* st);
+  LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
   virtual ~LazyClassPathEntry();
+  u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
 
   ClassFileStream* open_stream(const char* name, TRAPS);
   void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
@@ -140,6 +143,7 @@
 
 class PackageHashtable;
 class PackageInfo;
+class SharedPathsMiscInfo;
 template <MEMFLAGS F> class HashtableBucket;
 
 class ClassLoader: AllStatic {
@@ -147,7 +151,7 @@
   enum SomeConstants {
     package_hash_table_size = 31  // Number of buckets
   };
- private:
+ protected:
   friend class LazyClassPathEntry;
 
   // Performance counters
@@ -189,10 +193,15 @@
   static ClassPathEntry* _first_entry;
   // Last entry in linked list of ClassPathEntry instances
   static ClassPathEntry* _last_entry;
+  static int _num_entries;
+
   // Hash table used to keep track of loaded packages
   static PackageHashtable* _package_hash_table;
   static const char* _shared_archive;
 
+  // Info used by CDS
+  CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
+
   // Hash function
   static unsigned int hash(const char *s, int n);
   // Returns the package file name corresponding to the specified package
@@ -203,19 +212,23 @@
   static bool add_package(const char *pkgname, int classpath_index, TRAPS);
 
   // Initialization
-  static void setup_meta_index();
+  static void setup_bootstrap_meta_index();
+  static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
+                               int start_index);
   static void setup_bootstrap_search_path();
+  static void setup_search_path(char *class_path);
+
   static void load_zip_library();
   static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
-                                                 bool lazy, TRAPS);
+                                                 bool lazy, bool throw_exception, TRAPS);
 
   // Canonicalizes path names, so strcmp will work properly. This is mainly
   // to avoid confusing the zip library
   static bool get_canonical_path(char* orig, char* out, int len);
  public:
-  // Used by the kernel jvm.
-  static void update_class_path_entry_list(char *path,
-                                           bool check_for_duplicates);
+  static bool update_class_path_entry_list(char *path,
+                                           bool check_for_duplicates,
+                                           bool throw_exception=true);
   static void print_bootclasspath();
 
   // Timing
@@ -298,6 +311,7 @@
 
   // Initialization
   static void initialize();
+  CDS_ONLY(static void initialize_shared_path();)
   static void create_package_info_table();
   static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
                                         int number_of_entries);
@@ -312,10 +326,21 @@
     return e;
   }
 
+#if INCLUDE_CDS
   // Sharing dump and restore
   static void copy_package_info_buckets(char** top, char* end);
   static void copy_package_info_table(char** top, char* end);
 
+  static void  check_shared_classpath(const char *path);
+  static void  finalize_shared_paths_misc_info();
+  static int   get_shared_paths_misc_info_size();
+  static void* get_shared_paths_misc_info();
+  static bool  check_shared_paths_misc_info(void* info, int size);
+  static void  exit_with_path_failure(const char* error, const char* message);
+#endif
+
+  static void  trace_class_path(const char* msg, const char* name = NULL);
+
   // VM monitoring and management support
   static jlong classloader_time_ms();
   static jlong class_method_total_size();
@@ -339,7 +364,7 @@
 
   // Force compilation of all methods in all classes in bootstrap class path (stress test)
 #ifndef PRODUCT
- private:
+ protected:
   static int _compile_the_world_class_counter;
   static int _compile_the_world_method_counter;
  public:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classLoaderExt.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+
+#include "classfile/classLoader.hpp"
+
+class ClassLoaderExt: public ClassLoader { // AllStatic
+public:
+
+  class Context {
+    const char* _file_name;
+  public:
+    Context(const char* class_name, const char* file_name, TRAPS) {
+      _file_name = file_name;
+    }
+
+    bool check(ClassFileStream* stream, const int classpath_index) {
+      return true;
+    }
+
+    bool should_verify(int classpath_index) {
+      return false;
+    }
+
+    instanceKlassHandle record_result(const int classpath_index,
+                                      ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
+      if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
+        if (DumpSharedSpaces) {
+          result->set_shared_classpath_index(classpath_index);
+        }
+        return result;
+      } else {
+        return instanceKlassHandle(); // NULL
+      }
+    }
+  };
+
+
+  static void add_class_path_entry(char* path, bool check_for_duplicates,
+                            ClassPathEntry* new_entry) {
+    ClassLoader::add_to_list(new_entry);
+  }
+  static void setup_search_paths() {}
+};
+
+#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -130,15 +130,13 @@
 }
 
 
-bool Dictionary::do_unloading() {
+void Dictionary::do_unloading() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  bool class_was_unloaded = false;
-  int  index = 0; // Defined here for portability! Do not move
 
   // Remove unloadable entries and classes from system dictionary
   // The placeholder array has been handled in always_strong_oops_do.
   DictionaryEntry* probe = NULL;
-  for (index = 0; index < table_size(); index++) {
+  for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
       probe = *p;
       Klass* e = probe->klass();
@@ -158,16 +156,8 @@
         // Do we need to delete this system dictionary entry?
         if (loader_data->is_unloading()) {
           // If the loader is not live this entry should always be
-          // removed (will never be looked up again). Note that this is
-          // not the same as unloading the referred class.
-          if (k_def_class_loader_data == loader_data) {
-            // This is the defining entry, so the referred class is about
-            // to be unloaded.
-            class_was_unloaded = true;
-          }
-          // Also remove this system dictionary entry.
+          // removed (will never be looked up again).
           purge_entry = true;
-
         } else {
           // The loader in this entry is alive. If the klass is dead,
           // (determined by checking the defining class loader)
@@ -196,7 +186,6 @@
       p = probe->next_addr();
     }
   }
-  return class_was_unloaded;
 }
 
 void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
@@ -220,6 +209,29 @@
   _pd_cache_table->roots_oops_do(strong, weak);
 }
 
+void Dictionary::remove_classes_in_error_state() {
+  assert(DumpSharedSpaces, "supported only when dumping");
+  DictionaryEntry* probe = NULL;
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
+      probe = *p;
+      InstanceKlass* ik = InstanceKlass::cast(probe->klass());
+      if (ik->is_in_error_state()) { // purge this entry
+        *p = probe->next();
+        if (probe == _current_class_entry) {
+          _current_class_entry = NULL;
+        }
+        free_entry(probe);
+        ResourceMark rm;
+        tty->print_cr("Removed error class: %s", ik->external_name());
+        continue;
+      }
+
+      p = probe->next_addr();
+    }
+  }
+}
+
 void Dictionary::always_strong_oops_do(OopClosure* blk) {
   // Follow all system classes and temporary placeholders in dictionary; only
   // protection domain oops contain references into the heap. In a first
@@ -693,16 +705,17 @@
 
 
 // ----------------------------------------------------------------------------
-#ifndef PRODUCT
 
-void Dictionary::print() {
+void Dictionary::print(bool details) {
   ResourceMark rm;
   HandleMark   hm;
 
-  tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
-                 table_size(), number_of_entries());
-  tty->print_cr("^ indicates that initiating loader is different from "
-                "defining loader");
+  if (details) {
+    tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
+                   table_size(), number_of_entries());
+    tty->print_cr("^ indicates that initiating loader is different from "
+                  "defining loader");
+  }
 
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry* probe = bucket(index);
@@ -713,21 +726,28 @@
       ClassLoaderData* loader_data =  probe->loader_data();
       bool is_defining_class =
          (loader_data == InstanceKlass::cast(e)->class_loader_data());
-      tty->print("%s%s", is_defining_class ? " " : "^",
+      tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
                    e->external_name());
 
+      if (details) {
         tty->print(", loader ");
-      loader_data->print_value();
+        if (loader_data != NULL) {
+          loader_data->print_value();
+        } else {
+          tty->print("NULL");
+        }
+      }
       tty->cr();
     }
   }
-  tty->cr();
-  _pd_cache_table->print();
+
+  if (details) {
+    tty->cr();
+    _pd_cache_table->print();
+  }
   tty->cr();
 }
 
-#endif
-
 void Dictionary::verify() {
   guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
 
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -100,6 +100,7 @@
   void methods_do(void f(Method*));
 
   void unlink(BoolObjectClosure* is_alive);
+  void remove_classes_in_error_state();
 
   // Classes loaded by the bootstrap loader are always strongly reachable.
   // If we're not doing class unloading, all classes are strongly reachable.
@@ -108,9 +109,8 @@
     return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
   }
 
-  // Unload (that is, break root links to) all unmarked classes and
-  // loaders.  Returns "true" iff something was unloaded.
-  bool do_unloading();
+  // Unload (that is, break root links to) all unmarked classes and loaders.
+  void do_unloading();
 
   // Protection domains
   Klass* find(int index, unsigned int hash, Symbol* name,
@@ -127,9 +127,7 @@
 
   ProtectionDomainCacheEntry* cache_get(oop protection_domain);
 
-#ifndef PRODUCT
-  void print();
-#endif
+  void print(bool details = true);
   void verify();
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/filemap.hpp"
+
+class SharedClassUtil : AllStatic {
+public:
+
+  static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
+    return new SharedPathsMiscInfo();
+  }
+
+  static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
+    return new SharedPathsMiscInfo(buf, size);
+  }
+
+  static FileMapInfo::FileMapHeader* allocate_file_map_header() {
+    return new FileMapInfo::FileMapHeader();
+  }
+
+  static size_t file_map_header_size() {
+    return sizeof(FileMapInfo::FileMapHeader);
+  }
+
+  static size_t shared_class_path_entry_size() {
+    return sizeof(SharedClassPathEntry);
+  }
+
+  static void update_shared_classpath(ClassPathEntry *cpe,
+                                      SharedClassPathEntry* ent,
+                                      time_t timestamp,
+                                      long filesize, TRAPS) {
+    ent->_timestamp = timestamp;
+    ent->_filesize  = filesize;
+  }
+
+  static void initialize(TRAPS) {}
+
+  inline static bool is_shared_boot_class(Klass* klass) {
+    return (klass->_shared_class_path_index >= 0);
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "runtime/arguments.hpp"
+
+void SharedPathsMiscInfo::add_path(const char* path, int type) {
+  if (TraceClassPaths) {
+    tty->print("[type=%s] ", type_name(type));
+    trace_class_path("[Add misc shared path ", path);
+  }
+  write(path, strlen(path) + 1);
+  write_jint(jint(type));
+}
+
+void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
+  assert(_allocated, "cannot modify buffer during validation.");
+  int used = get_used_bytes();
+  int target = used + int(needed_bytes);
+  if (target > _buf_size) {
+    _buf_size = _buf_size * 2 + (int)needed_bytes;
+    _buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
+    _cur_ptr = _buf_start + used;
+    _end_ptr = _buf_start + _buf_size;
+  }
+}
+
+void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
+  ensure_size(size);
+  memcpy(_cur_ptr, ptr, size);
+  _cur_ptr += size;
+}
+
+bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
+  if (_cur_ptr + size <= _end_ptr) {
+    memcpy(ptr, _cur_ptr, size);
+    _cur_ptr += size;
+    return true;
+  }
+  return false;
+}
+
+bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
+  ClassLoader::trace_class_path(msg, name);
+  MetaspaceShared::set_archive_loading_failed();
+  return false;
+}
+
+bool SharedPathsMiscInfo::check() {
+  // The whole buffer must be 0 terminated so that we can use strlen and strcmp
+  // without fear.
+  _end_ptr -= sizeof(jint);
+  if (_cur_ptr >= _end_ptr) {
+    return fail("Truncated archive file header");
+  }
+  if (*_end_ptr != 0) {
+    return fail("Corrupted archive file header");
+  }
+
+  while (_cur_ptr < _end_ptr) {
+    jint type;
+    const char* path = _cur_ptr;
+    _cur_ptr += strlen(path) + 1;
+    if (!read_jint(&type)) {
+      return fail("Corrupted archive file header");
+    }
+    if (TraceClassPaths) {
+      tty->print("[type=%s ", type_name(type));
+      print_path(tty, type, path);
+      tty->print_cr("]");
+    }
+    if (!check(type, path)) {
+      if (!PrintSharedArchiveAndExit) {
+        return false;
+      }
+    } else {
+      trace_class_path("[ok");
+    }
+  }
+
+  return true;
+}
+
+bool SharedPathsMiscInfo::check(jint type, const char* path) {
+  switch (type) {
+  case BOOT:
+    if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
+      return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
+    }
+    break;
+  case NON_EXIST: // fall-through
+  case REQUIRED:
+    {
+      struct stat st;
+      if (os::stat(path, &st) != 0) {
+        // The file does not actually exist
+        if (type == REQUIRED) {
+          // but we require it to exist -> fail
+          return fail("Required file doesn't exist");
+        }
+      } else {
+        // The file actually exists
+        if (type == NON_EXIST) {
+          // But we want it to not exist -> fail
+          return fail("File must not exist");
+        }
+        time_t    timestamp;
+        long      filesize;
+
+        if (!read_time(&timestamp) || !read_long(&filesize)) {
+          return fail("Corrupted archive file header");
+        }
+        if (timestamp != st.st_mtime) {
+          return fail("Timestamp mismatch");
+        }
+        if (filesize  != st.st_size) {
+          return fail("File size mismatch");
+        }
+      }
+    }
+    break;
+
+  default:
+    return fail("Corrupted archive file header");
+  }
+
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+
+#include "runtime/os.hpp"
+
+// During dumping time, when processing class paths, we build up the dump-time
+// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
+// However, we need to store other "misc" information for run-time checking, such as
+//
+// + The values of Arguments::get_sysclasspath() used during dumping.
+//
+// + The meta-index file(s) used during dumping (incl modification time and size)
+//
+// + The class path elements specified during dumping but did not exist --
+//   these elements must also be specified at run time, and they also must not
+//   exist at run time.
+//
+// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
+// The storage format is stream oriented to minimize its size.
+//
+// When writing the information to the archive file, SharedPathsMiscInfo is stored in
+// the archive file header. At run-time, this information is used only during initialization
+// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
+//
+// The SharedPathsMiscInfo class is used for both creating the the information (during
+// dumping time) and validation (at run time). Different constructors are used in the
+// two situations. See below.
+
+class SharedPathsMiscInfo : public CHeapObj<mtClass> {
+protected:
+  char* _buf_start;
+  char* _cur_ptr;
+  char* _end_ptr;
+  int   _buf_size;
+  bool  _allocated;   // was _buf_start allocated by me?
+  void ensure_size(size_t needed_bytes);
+  void add_path(const char* path, int type);
+
+  void write(const void* ptr, size_t size);
+  bool read(void* ptr, size_t size);
+
+  static void trace_class_path(const char* msg, const char* name = NULL) {
+    ClassLoader::trace_class_path(msg, name);
+  }
+protected:
+  static bool fail(const char* msg, const char* name = NULL);
+  virtual bool check(jint type, const char* path);
+
+public:
+  enum {
+    INITIAL_BUF_SIZE = 128
+  };
+  // This constructor is used when creating the misc information (during dump)
+  SharedPathsMiscInfo() {
+    _buf_size = INITIAL_BUF_SIZE;
+    _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
+    _allocated = true;
+  }
+  // This constructor is used when validating the misc info (during run time)
+  SharedPathsMiscInfo(char *buff, int size) {
+    _cur_ptr = _buf_start = buff;
+    _end_ptr = _buf_start + size;
+    _buf_size = size;
+    _allocated = false;
+  }
+  ~SharedPathsMiscInfo() {
+    if (_allocated) {
+      FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
+    }
+  }
+  int get_used_bytes() {
+    return _cur_ptr - _buf_start;
+  }
+  void* buffer() {
+    return _buf_start;
+  }
+
+  // writing --
+
+  // The path must not exist at run-time
+  void add_nonexist_path(const char* path) {
+    add_path(path, NON_EXIST);
+  }
+
+  // The path must exist and have required size and modification time
+  void add_required_file(const char* path) {
+    add_path(path, REQUIRED);
+
+    struct stat st;
+    if (os::stat(path, &st) != 0) {
+      assert(0, "sanity");
+      ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
+    }
+    write_time(st.st_mtime);
+    write_long(st.st_size);
+  }
+
+  // The path must exist, and must contain exactly <num_entries> files/dirs
+  void add_boot_classpath(const char* path) {
+    add_path(path, BOOT);
+  }
+  int write_jint(jint num) {
+    write(&num, sizeof(num));
+    return 0;
+  }
+  void write_time(time_t t) {
+    write(&t, sizeof(t));
+  }
+  void write_long(long l) {
+    write(&l, sizeof(l));
+  }
+
+  bool dump_to_file(int fd) {
+    int n = get_used_bytes();
+    return (os::write(fd, _buf_start, n) == (size_t)n);
+  }
+
+  // reading --
+
+  enum {
+    BOOT      = 1,
+    NON_EXIST = 2,
+    REQUIRED  = 3
+  };
+
+  virtual const char* type_name(int type) {
+    switch (type) {
+    case BOOT:      return "BOOT";
+    case NON_EXIST: return "NON_EXIST";
+    case REQUIRED:  return "REQUIRED";
+    default:        ShouldNotReachHere(); return "?";
+    }
+  }
+
+  virtual void print_path(outputStream* out, int type, const char* path) {
+    switch (type) {
+    case BOOT:
+      out->print("Expecting -Dsun.boot.class.path=%s", path);
+      break;
+    case NON_EXIST:
+      out->print("Expecting that %s does not exist", path);
+      break;
+    case REQUIRED:
+      out->print("Expecting that file %s must exist and not altered", path);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  bool check();
+  bool read_jint(jint *ptr) {
+    return read(ptr, sizeof(jint));
+  }
+  bool read_long(long *ptr) {
+    return read(ptr, sizeof(long));
+  }
+  bool read_time(time_t *ptr) {
+    return read(ptr, sizeof(time_t));
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -31,10 +31,15 @@
 #include "classfile/resolutionErrors.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
+#include "memory/filemap.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
@@ -110,6 +115,8 @@
                          CHECK);
 
   _java_system_loader = (oop)result.get_jobject();
+
+  CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
 }
 
 
@@ -974,6 +981,7 @@
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
     guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
     loader_data->record_dependency(host_klass(), CHECK_NULL);
   } else {
@@ -1134,7 +1142,7 @@
   return k();
 }
 
-
+#if INCLUDE_CDS
 void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                              int number_of_entries) {
   assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
@@ -1167,15 +1175,21 @@
 instanceKlassHandle SystemDictionary::load_shared_class(
                  Symbol* class_name, Handle class_loader, TRAPS) {
   instanceKlassHandle ik (THREAD, find_shared_class(class_name));
-  return load_shared_class(ik, class_loader, THREAD);
+  // Make sure we only return the boot class for the NULL classloader.
+  if (ik.not_null() &&
+      SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
+    Handle protection_domain;
+    return load_shared_class(ik, class_loader, protection_domain, THREAD);
+  }
+  return instanceKlassHandle();
 }
 
-instanceKlassHandle SystemDictionary::load_shared_class(
-                 instanceKlassHandle ik, Handle class_loader, TRAPS) {
-  assert(class_loader.is_null(), "non-null classloader for shared class?");
+instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
+                                                        Handle class_loader,
+                                                        Handle protection_domain, TRAPS) {
   if (ik.not_null()) {
     instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-    Symbol*  class_name = ik->name();
+    Symbol* class_name = ik->name();
 
     // Found the class, now load the superclass and interfaces.  If they
     // are shared, add them to the main system dictionary and reset
@@ -1184,7 +1198,7 @@
     if (ik->super() != NULL) {
       Symbol*  cn = ik->super()->name();
       resolve_super_or_fail(class_name, cn,
-                            class_loader, Handle(), true, CHECK_(nh));
+                            class_loader, protection_domain, true, CHECK_(nh));
     }
 
     Array<Klass*>* interfaces = ik->local_interfaces();
@@ -1197,7 +1211,7 @@
       // reinitialized yet (they will be once the interface classes
       // are loaded)
       Symbol*  name  = k->name();
-      resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
+      resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh));
     }
 
     // Adjust methods to recover missing data.  They need addresses for
@@ -1206,30 +1220,47 @@
 
     // Updating methods must be done under a lock so multiple
     // threads don't update these in parallel
-    // Shared classes are all currently loaded by the bootstrap
-    // classloader, so this will never cause a deadlock on
-    // a custom class loader lock.
+    //
+    // Shared classes are all currently loaded by either the bootstrap or
+    // internal parallel class loaders, so this will never cause a deadlock
+    // on a custom class loader lock.
 
+    ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
     {
       Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
       check_loader_lock_contention(lockObject, THREAD);
       ObjectLocker ol(lockObject, THREAD, true);
-      ik->restore_unshareable_info(CHECK_(nh));
+      ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
     }
 
     if (TraceClassLoading) {
       ResourceMark rm;
       tty->print("[Loaded %s", ik->external_name());
       tty->print(" from shared objects file");
+      if (class_loader.not_null()) {
+        tty->print(" by %s", loader_data->loader_name());
+      }
       tty->print_cr("]");
     }
+
+#if INCLUDE_CDS
+    if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
+      // Only dump the classes that can be stored into CDS archive
+      if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+        ResourceMark rm(THREAD);
+        classlist_file->print_cr("%s", ik->name()->as_C_string());
+        classlist_file->flush();
+      }
+    }
+#endif
+
     // notify a class loaded from shared object
     ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
                                              true /* shared class */);
   }
   return ik;
 }
-
+#endif
 
 instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
   instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@@ -1239,8 +1270,10 @@
     // shared spaces.
     instanceKlassHandle k;
     {
+#if INCLUDE_CDS
       PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
       k = load_shared_class(class_name, class_loader, THREAD);
+#endif
     }
 
     if (k.is_null()) {
@@ -1599,7 +1632,6 @@
   Universe::flush_dependents_on(k);
 }
 
-
 // ----------------------------------------------------------------------------
 // GC support
 
@@ -1661,10 +1693,9 @@
 // Note: anonymous classes are not in the SD.
 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
   // First, mark for unload all ClassLoaderData referencing a dead class loader.
-  bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
-  bool unloading_occurred = false;
-  if (has_dead_loaders) {
-    unloading_occurred = dictionary()->do_unloading();
+  bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
+  if (unloading_occurred) {
+    dictionary()->do_unloading();
     constraints()->purge_loader_constraints();
     resolution_errors()->purge_resolution_errors();
   }
@@ -1682,6 +1713,7 @@
 void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
   strong->do_oop(&_java_system_loader);
   strong->do_oop(&_system_loader_lock_obj);
+  CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
 
   // Adjust dictionary
   dictionary()->roots_oops_do(strong, weak);
@@ -1693,6 +1725,7 @@
 void SystemDictionary::oops_do(OopClosure* f) {
   f->do_oop(&_java_system_loader);
   f->do_oop(&_system_loader_lock_obj);
+  CDS_ONLY(SystemDictionaryShared::oops_do(f);)
 
   // Adjust dictionary
   dictionary()->oops_do(f);
@@ -1754,6 +1787,10 @@
   invoke_method_table()->methods_do(f);
 }
 
+void SystemDictionary::remove_classes_in_error_state() {
+  dictionary()->remove_classes_in_error_state();
+}
+
 // ----------------------------------------------------------------------------
 // Lazily load klasses
 
@@ -2563,10 +2600,12 @@
 
 
 // ----------------------------------------------------------------------------
-#ifndef PRODUCT
+void SystemDictionary::print_shared(bool details) {
+  shared_dictionary()->print(details);
+}
 
-void SystemDictionary::print() {
-  dictionary()->print();
+void SystemDictionary::print(bool details) {
+  dictionary()->print(details);
 
   // Placeholders
   GCMutexLocker mu(SystemDictionary_lock);
@@ -2576,7 +2615,6 @@
   constraints()->print();
 }
 
-#endif
 
 void SystemDictionary::verify() {
   guarantee(dictionary() != NULL, "Verify of system dictionary failed");
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -111,6 +111,7 @@
   do_klass(SecurityManager_klass,                       java_lang_SecurityManager,                 Pre                 ) \
   do_klass(ProtectionDomain_klass,                      java_security_ProtectionDomain,            Pre                 ) \
   do_klass(AccessControlContext_klass,                  java_security_AccessControlContext,        Pre                 ) \
+  do_klass(SecureClassLoader_klass,                     java_security_SecureClassLoader,           Pre                 ) \
   do_klass(ClassNotFoundException_klass,                java_lang_ClassNotFoundException,          Pre                 ) \
   do_klass(NoClassDefFoundError_klass,                  java_lang_NoClassDefFoundError,            Pre                 ) \
   do_klass(LinkageError_klass,                          java_lang_LinkageError,                    Pre                 ) \
@@ -166,6 +167,15 @@
   do_klass(StringBuilder_klass,                         java_lang_StringBuilder,                   Pre                 ) \
   do_klass(misc_Unsafe_klass,                           sun_misc_Unsafe,                           Pre                 ) \
                                                                                                                          \
+  /* support for CDS */                                                                                                  \
+  do_klass(ByteArrayInputStream_klass,                  java_io_ByteArrayInputStream,              Pre                 ) \
+  do_klass(File_klass,                                  java_io_File,                              Pre                 ) \
+  do_klass(URLClassLoader_klass,                        java_net_URLClassLoader,                   Pre                 ) \
+  do_klass(URL_klass,                                   java_net_URL,                              Pre                 ) \
+  do_klass(Jar_Manifest_klass,                          java_util_jar_Manifest,                    Pre                 ) \
+  do_klass(sun_misc_Launcher_klass,                     sun_misc_Launcher,                         Pre                 ) \
+  do_klass(CodeSource_klass,                            java_security_CodeSource,                  Pre                 ) \
+                                                                                                                         \
   /* It's NULL in non-1.4 JDKs. */                                                                                       \
   do_klass(StackTraceElement_klass,                     java_lang_StackTraceElement,               Opt                 ) \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
@@ -221,7 +231,7 @@
   static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
   // Convenient call for null loader and protection domain.
   static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
-private:
+protected:
   // handle error translation for resolve_or_null results
   static Klass* handle_resolution_exception(Symbol* class_name, bool throw_error, KlassHandle klass_h, TRAPS);
 
@@ -326,6 +336,9 @@
   // loaders.  Returns "true" iff something was unloaded.
   static bool do_unloading(BoolObjectClosure* is_alive);
 
+  // Used by DumpSharedSpaces only to remove classes that failed verification
+  static void remove_classes_in_error_state();
+
   static int calculate_systemdictionary_size(int loadedclasses);
 
   // Applies "f->do_oop" to all root oops in the system dictionary.
@@ -335,7 +348,7 @@
   // System loader lock
   static oop system_loader_lock()           { return _system_loader_lock_obj; }
 
-private:
+protected:
   // Extended Redefine classes support (tbi)
   static void preloaded_classes_do(KlassClosure* f);
   static void lazily_loaded_classes_do(KlassClosure* f);
@@ -348,7 +361,8 @@
   static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                     int number_of_entries);
   // Printing
-  static void print()                   PRODUCT_RETURN;
+  static void print(bool details = true);
+  static void print_shared(bool details = true);
   static void print_class_statistics()  PRODUCT_RETURN;
   static void print_method_statistics() PRODUCT_RETURN;
 
@@ -424,7 +438,7 @@
 
   static void load_abstract_ownable_synchronizer_klass(TRAPS);
 
-private:
+protected:
   // Tells whether ClassLoader.loadClassInternal is present
   static bool has_loadClassInternal()       { return _has_loadClassInternal; }
 
@@ -452,7 +466,7 @@
 
   // Register a new class loader
   static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
-private:
+protected:
   // Mirrors for primitive classes (created eagerly)
   static oop check_mirror(oop m) {
     assert(m != NULL, "mirror not initialized");
@@ -523,7 +537,7 @@
   static Symbol* find_resolution_error(constantPoolHandle pool, int which,
                                        Symbol** message);
 
- private:
+ protected:
 
   enum Constants {
     _loader_constraint_size = 107,                     // number of entries in constraint table
@@ -574,7 +588,7 @@
   friend class CounterDecay;
   static Klass* try_get_next_class();
 
-private:
+protected:
   static void validate_protection_domain(instanceKlassHandle klass,
                                          Handle class_loader,
                                          Handle protection_domain, TRAPS);
@@ -601,10 +615,10 @@
   static instanceKlassHandle find_or_define_instance_class(Symbol* class_name,
                                                 Handle class_loader,
                                                 instanceKlassHandle k, TRAPS);
-  static instanceKlassHandle load_shared_class(Symbol* class_name,
-                                               Handle class_loader, TRAPS);
   static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
-                                               Handle class_loader, TRAPS);
+                                               Handle class_loader,
+                                               Handle protection_domain,
+                                               TRAPS);
   static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
   static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
   static void check_loader_lock_contention(Handle loader_lock, TRAPS);
@@ -612,9 +626,12 @@
   static bool is_parallelDefine(Handle class_loader);
 
 public:
+  static instanceKlassHandle load_shared_class(Symbol* class_name,
+                                               Handle class_loader,
+                                               TRAPS);
   static bool is_ext_class_loader(Handle class_loader);
 
-private:
+protected:
   static Klass* find_shared_class(Symbol* class_name);
 
   // Setup link to hierarchy
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+
+#include "classfile/systemDictionary.hpp"
+
+class SystemDictionaryShared: public SystemDictionary {
+public:
+  static void initialize(TRAPS) {}
+  static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
+                                                       Handle class_loader,
+                                                       TRAPS) {
+    return instanceKlassHandle();
+  }
+  static void roots_oops_do(OopClosure* blk) {}
+  static void oops_do(OopClosure* f) {}
+  static bool is_sharing_possible(ClassLoaderData* loader_data) {
+    oop class_loader = loader_data->class_loader();
+    return (class_loader == NULL);
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -91,11 +91,17 @@
   template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
   template(java_lang_SecurityManager,                 "java/lang/SecurityManager")                \
   template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
+  template(java_security_CodeSource,                  "java/security/CodeSource")                 \
   template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
+  template(java_security_SecureClassLoader,           "java/security/SecureClassLoader")          \
+  template(java_net_URLClassLoader,                   "java/net/URLClassLoader")                  \
+  template(java_net_URL,                              "java/net/URL")                             \
+  template(java_util_jar_Manifest,                    "java/util/jar/Manifest")                   \
   template(impliesCreateAccessControlContext_name,    "impliesCreateAccessControlContext")        \
   template(java_io_OutputStream,                      "java/io/OutputStream")                     \
   template(java_io_Reader,                            "java/io/Reader")                           \
   template(java_io_BufferedReader,                    "java/io/BufferedReader")                   \
+  template(java_io_File,                              "java/io/File")                             \
   template(java_io_FileInputStream,                   "java/io/FileInputStream")                  \
   template(java_io_ByteArrayInputStream,              "java/io/ByteArrayInputStream")             \
   template(java_io_Serializable,                      "java/io/Serializable")                     \
@@ -106,6 +112,7 @@
   template(java_util_Hashtable,                       "java/util/Hashtable")                      \
   template(java_lang_Compiler,                        "java/lang/Compiler")                       \
   template(sun_misc_Signal,                           "sun/misc/Signal")                          \
+  template(sun_misc_Launcher,                         "sun/misc/Launcher")                        \
   template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
@@ -396,6 +403,14 @@
   template(signers_name,                              "signers_name")                             \
   template(loader_data_name,                          "loader_data")                              \
   template(dependencies_name,                         "dependencies")                             \
+  template(input_stream_void_signature,               "(Ljava/io/InputStream;)V")                 \
+  template(getFileURL_name,                           "getFileURL")                               \
+  template(getFileURL_signature,                      "(Ljava/io/File;)Ljava/net/URL;")           \
+  template(definePackageInternal_name,                "definePackageInternal")                    \
+  template(definePackageInternal_signature,           "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \
+  template(getProtectionDomain_name,                  "getProtectionDomain")                      \
+  template(getProtectionDomain_signature,             "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
+  template(url_code_signer_array_void_signature,      "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
   template(register_method_name,                      "register")                                 \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -2734,10 +2734,12 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists.  Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl".  "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
 
   // We'll try all multiples of word_sz in the indexed set, starting with
   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2818,11 +2820,15 @@
                         Mutex::_no_safepoint_check_flag);
         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
         _indexedFreeList[word_sz].set_split_births(births);
-        return;
+        return true;
       }
     }
+    return found;
   }
-  // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
   FreeChunk* fc = NULL;
   FreeChunk* rem_fc = NULL;
   size_t rem;
@@ -2833,16 +2839,12 @@
       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_update(fc->size(),
-                                       true /*split*/,
-                                       false /*birth*/);
         break;
       } else {
         n--;
       }
     }
-    if (fc == NULL) return;
+    if (fc == NULL) return NULL;
     // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
     assert(fc->is_free(), "Error: should be a free block");
@@ -2864,10 +2866,14 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
-      assert(fl->count() == 0, "We never allocated any blocks");
-      return;
+      return NULL;
     }
 
+    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
+    dictionary()->dict_census_update(fc->size(),
+                                     true /*split*/,
+                                     false /*birth*/);
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
     // back the remainder to the dictionary, since a concurrent allocation
@@ -2900,7 +2906,24 @@
     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
     smallSplitBirth(rem);
   }
-  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+  assert(n * word_sz == fc->size(),
+    err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+    SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+    fc->size(), n, word_sz));
+  return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
+
+  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+  if (fc == NULL) {
+    return;
+  }
+
+  size_t n = fc->size() / word_sz;
+
+  assert((ssize_t)n > 0, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2948,6 +2971,20 @@
   assert(fl->tail()->next() == NULL, "List invariant.");
 }
 
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+         "Precondition");
+
+  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+    // Got it
+    return;
+  }
+
+  // Otherwise, we'll split a block from the dictionary.
+  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
 // Set up the space's par_seq_tasks structure for work claiming
 // for parallel rescan. See CMSParRemarkTask where this is currently used.
 // XXX Need to suitably abstract and generalize this and the next
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -172,6 +172,20 @@
   // list of size "word_sz", and must now be decremented.
   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // indexed_free_lists.
+  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
+  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+  // evenly splittable into "n" "word_sz" chunks.  Returns that
+  // evenly splittable chunk.  May split a larger chunk to get the
+  // evenly splittable chunk.
+  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // dictionary.
+  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
   // first.  This allocation strategy assumes a companion sweeping
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -81,8 +81,8 @@
   }
 }
 
-void ConcurrentG1Refine::init() {
-  _hot_card_cache.initialize();
+void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
+  _hot_card_cache.initialize(card_counts_storage);
 }
 
 void ConcurrentG1Refine::stop() {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -34,6 +34,7 @@
 class ConcurrentG1RefineThread;
 class G1CollectedHeap;
 class G1HotCardCache;
+class G1RegionToSpaceMapper;
 class G1RemSet;
 class DirtyCardQueue;
 
@@ -74,7 +75,7 @@
   ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
   ~ConcurrentG1Refine();
 
-  void init(); // Accomplish some initialization that has to wait.
+  void init(G1RegionToSpaceMapper* card_counts_storage);
   void stop();
 
   void reinitialize_threads();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -36,6 +36,7 @@
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
@@ -99,12 +100,12 @@
 }
 
 #ifndef PRODUCT
-bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
+bool CMBitMapRO::covers(MemRegion heap_rs) const {
   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
          "size inconsistency");
-  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
-         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
+  return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
+         _bmWordSize  == heap_rs.word_size();
 }
 #endif
 
@@ -112,33 +113,73 @@
   _bm.print_on_error(st, prefix);
 }
 
-bool CMBitMap::allocate(ReservedSpace heap_rs) {
-  _bmStartWord = (HeapWord*)(heap_rs.base());
-  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
-  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
-                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-  if (!brs.is_reserved()) {
-    warning("ConcurrentMark marking bit map allocation failure");
+size_t CMBitMap::compute_size(size_t heap_size) {
+  return heap_size / mark_distance();
+}
+
+size_t CMBitMap::mark_distance() {
+  return MinObjAlignmentInBytes * BitsPerByte;
+}
+
+void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
+  _bmStartWord = heap.start();
+  _bmWordSize = heap.word_size();
+
+  _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
+  _bm.set_size(_bmWordSize >> _shifter);
+
+  storage->set_mapping_changed_listener(&_listener);
+}
+
+void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
+  // We need to clear the bitmap on commit, removing any existing information.
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
+  _bm->clearRange(mr);
+}
+
+// Closure used for clearing the given mark bitmap.
+class ClearBitmapHRClosure : public HeapRegionClosure {
+ private:
+  ConcurrentMark* _cm;
+  CMBitMap* _bitmap;
+  bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
+ public:
+  ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
+    assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    size_t const chunk_size_in_words = M / HeapWordSize;
+
+    HeapWord* cur = r->bottom();
+    HeapWord* const end = r->end();
+
+    while (cur < end) {
+      MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
+      _bitmap->clearRange(mr);
+
+      cur += chunk_size_in_words;
+
+      // Abort iteration if after yielding the marking has been aborted.
+      if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
+        return true;
+      }
+      // Repeat the asserts from before the start of the closure. We will do them
+      // as asserts here to minimize their overhead on the product. However, we
+      // will have them as guarantees at the beginning / end of the bitmap
+      // clearing to get some checking in the product.
+      assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
+      assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
+    }
+
     return false;
   }
-  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
-  // For now we'll just commit all of the bit map up front.
-  // Later on we'll try to be more parsimonious with swap.
-  if (!_virtual_space.initialize(brs, brs.size())) {
-    warning("ConcurrentMark marking bit map backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of concurrent marking bit map?");
-  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
-  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
-         _bmWordSize, "inconsistency in bit map sizing");
-  _bm.set_size(_bmWordSize >> _shifter);
-  return true;
-}
+};
 
 void CMBitMap::clearAll() {
-  _bm.clear();
+  ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
+  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
 
@@ -483,10 +524,10 @@
   return MAX2((n_par_threads + 2) / 4, 1U);
 }
 
-ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
+ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
   _g1h(g1h),
-  _markBitMap1(log2_intptr(MinObjAlignment)),
-  _markBitMap2(log2_intptr(MinObjAlignment)),
+  _markBitMap1(),
+  _markBitMap2(),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
@@ -495,7 +536,7 @@
   _cleanup_task_overhead(1.0),
   _cleanup_list("Cleanup List"),
   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
-  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
+  _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
             CardTableModRefBS::card_shift,
             false /* in_resource_area*/),
 
@@ -545,14 +586,8 @@
                            "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
   }
 
-  if (!_markBitMap1.allocate(heap_rs)) {
-    warning("Failed to allocate first CM bit map");
-    return;
-  }
-  if (!_markBitMap2.allocate(heap_rs)) {
-    warning("Failed to allocate second CM bit map");
-    return;
-  }
+  _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
+  _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 
   // Create & start a ConcurrentMark thread.
   _cmThread = new ConcurrentMarkThread(this);
@@ -563,8 +598,8 @@
   }
 
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
-  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
-  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
+  assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
+  assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
@@ -724,38 +759,17 @@
   clear_all_count_data();
 
   // so that the call below can read a sensible value
-  _heap_start = (HeapWord*) heap_rs.base();
+  _heap_start = g1h->reserved_region().start();
   set_non_marking_state();
   _completed_initialization = true;
 }
 
-void ConcurrentMark::update_g1_committed(bool force) {
-  // If concurrent marking is not in progress, then we do not need to
-  // update _heap_end.
-  if (!concurrent_marking_in_progress() && !force) return;
-
-  MemRegion committed = _g1h->g1_committed();
-  assert(committed.start() == _heap_start, "start shouldn't change");
-  HeapWord* new_end = committed.end();
-  if (new_end > _heap_end) {
-    // The heap has been expanded.
-
-    _heap_end = new_end;
-  }
-  // Notice that the heap can also shrink. However, this only happens
-  // during a Full GC (at least currently) and the entire marking
-  // phase will bail out and the task will not be restarted. So, let's
-  // do nothing.
-}
-
 void ConcurrentMark::reset() {
   // Starting values for these two. This should be called in a STW
-  // phase. CM will be notified of any future g1_committed expansions
-  // will be at the end of evacuation pauses, when tasks are
-  // inactive.
-  MemRegion committed = _g1h->g1_committed();
-  _heap_start = committed.start();
-  _heap_end   = committed.end();
+  // phase.
+  MemRegion reserved = _g1h->g1_reserved();
+  _heap_start = reserved.start();
+  _heap_end   = reserved.end();
 
   // Separated the asserts so that we know which one fires.
   assert(_heap_start != NULL, "heap bounds should look ok");
@@ -827,7 +841,6 @@
     assert(out_of_regions(),
            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
                    p2i(_finger), p2i(_heap_end)));
-    update_g1_committed(true);
   }
 }
 
@@ -846,7 +859,6 @@
 
 void ConcurrentMark::clearNextBitmap() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
 
   // Make sure that the concurrent mark thread looks to still be in
   // the current cycle.
@@ -858,41 +870,36 @@
   // is the case.
   guarantee(!g1h->mark_in_progress(), "invariant");
 
-  // clear the mark bitmap (no grey objects to start with).
-  // We need to do this in chunks and offer to yield in between
-  // each chunk.
-  HeapWord* start  = _nextMarkBitMap->startWord();
-  HeapWord* end    = _nextMarkBitMap->endWord();
-  HeapWord* cur    = start;
-  size_t chunkSize = M;
-  while (cur < end) {
-    HeapWord* next = cur + chunkSize;
-    if (next > end) {
-      next = end;
-    }
-    MemRegion mr(cur,next);
-    _nextMarkBitMap->clearRange(mr);
-    cur = next;
-    do_yield_check();
-
-    // Repeat the asserts from above. We'll do them as asserts here to
-    // minimize their overhead on the product. However, we'll have
-    // them as guarantees at the beginning / end of the bitmap
-    // clearing to get some checking in the product.
-    assert(cmThread()->during_cycle(), "invariant");
-    assert(!g1h->mark_in_progress(), "invariant");
+  ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
+  g1h->heap_region_iterate(&cl);
+
+  // Clear the liveness counting data. If the marking has been aborted, the abort()
+  // call already did that.
+  if (cl.complete()) {
+    clear_all_count_data();
   }
 
-  // Clear the liveness counting data
-  clear_all_count_data();
-
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
   guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
+class CheckBitmapClearHRClosure : public HeapRegionClosure {
+  CMBitMap* _bitmap;
+  bool _error;
+ public:
+  CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
+  }
+};
+
 bool ConcurrentMark::nextMarkBitmapIsClear() {
-  return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
+  CheckBitmapClearHRClosure cl(_nextMarkBitMap);
+  _g1h->heap_region_iterate(&cl);
+  return cl.complete();
 }
 
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
@@ -2193,10 +2200,10 @@
                            _cleanup_list.length());
   }
 
-  // Noone else should be accessing the _cleanup_list at this point,
-  // so it's not necessary to take any locks
+  // No one else should be accessing the _cleanup_list at this point,
+  // so it is not necessary to take any locks
   while (!_cleanup_list.is_empty()) {
-    HeapRegion* hr = _cleanup_list.remove_head();
+    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
     assert(hr != NULL, "Got NULL from a non-empty list");
     hr->par_clear();
     tmp_free_list.add_ordered(hr);
@@ -2980,22 +2987,25 @@
     // claim_region() and a humongous object allocation might force us
     // to do a bit of unnecessary work (due to some unnecessary bitmap
     // iterations) but it should not introduce and correctness issues.
-    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
-    HeapWord*   bottom        = curr_region->bottom();
-    HeapWord*   end           = curr_region->end();
-    HeapWord*   limit         = curr_region->next_top_at_mark_start();
-
-    if (verbose_low()) {
-      gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
-                             "["PTR_FORMAT", "PTR_FORMAT"), "
-                             "limit = "PTR_FORMAT,
-                             worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
-    }
+    HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
+
+    // Above heap_region_containing_raw may return NULL as we always scan claim
+    // until the end of the heap. In this case, just jump to the next region.
+    HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
     // Is the gap between reading the finger and doing the CAS too long?
     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
-    if (res == finger) {
+    if (res == finger && curr_region != NULL) {
       // we succeeded
+      HeapWord*   bottom        = curr_region->bottom();
+      HeapWord*   limit         = curr_region->next_top_at_mark_start();
+
+      if (verbose_low()) {
+        gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
+                               "["PTR_FORMAT", "PTR_FORMAT"), "
+                               "limit = "PTR_FORMAT,
+                               worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
+      }
 
       // notice that _finger == end cannot be guaranteed here since,
       // someone else might have moved the finger even further
@@ -3026,10 +3036,17 @@
     } else {
       assert(_finger > finger, "the finger should have moved forward");
       if (verbose_low()) {
-        gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
-                               "global finger = "PTR_FORMAT", "
-                               "our finger = "PTR_FORMAT,
-                               worker_id, p2i(_finger), p2i(finger));
+        if (curr_region == NULL) {
+          gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        } else {
+          gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        }
       }
 
       // read it again
@@ -3144,8 +3161,10 @@
       // happens, heap_region_containing() will return the bottom of the
       // corresponding starts humongous region and the check below will
       // not hold any more.
+      // Since we always iterate over all regions, we might get a NULL HeapRegion
+      // here.
       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
-      guarantee(global_finger == global_hr->bottom(),
+      guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
     }
@@ -3158,7 +3177,7 @@
       if (task_finger != NULL && task_finger < _heap_end) {
         // See above note on the global finger verification.
         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
-        guarantee(task_finger == task_hr->bottom() ||
+        guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
                   !task_hr->in_collection_set(),
                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
@@ -4674,7 +4693,6 @@
     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  MemRegion g1_committed = g1h->g1_committed();
   MemRegion g1_reserved = g1h->g1_reserved();
   double now = os::elapsedTime();
 
@@ -4682,10 +4700,8 @@
   _out->cr();
   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
-                 G1PPRL_SUM_ADDR_FORMAT("committed")
                  G1PPRL_SUM_ADDR_FORMAT("reserved")
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
-                 p2i(g1_committed.start()), p2i(g1_committed.end()),
                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
                  HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -27,10 +27,12 @@
 
 #include "classfile/javaClasses.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/shared/gcId.hpp"
 #include "utilities/taskqueue.hpp"
 
 class G1CollectedHeap;
+class CMBitMap;
 class CMTask;
 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
@@ -57,7 +59,6 @@
   HeapWord* _bmStartWord;      // base address of range covered by map
   size_t    _bmWordSize;       // map size (in #HeapWords covered)
   const int _shifter;          // map to char or bit
-  VirtualSpace _virtual_space; // underlying the bit map
   BitMap    _bm;               // the bit map itself
 
  public:
@@ -115,42 +116,41 @@
   void print_on_error(outputStream* st, const char* prefix) const;
 
   // debugging
-  NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
+  NOT_PRODUCT(bool covers(MemRegion rs) const;)
+};
+
+class CMBitMapMappingChangedListener : public G1MappingChangedListener {
+ private:
+  CMBitMap* _bm;
+ public:
+  CMBitMapMappingChangedListener() : _bm(NULL) {}
+
+  void set_bitmap(CMBitMap* bm) { _bm = bm; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
 };
 
 class CMBitMap : public CMBitMapRO {
+ private:
+  CMBitMapMappingChangedListener _listener;
 
  public:
-  // constructor
-  CMBitMap(int shifter) :
-    CMBitMapRO(shifter) {}
+  static size_t compute_size(size_t heap_size);
+  // Returns the amount of bytes on the heap between two marks in the bitmap.
+  static size_t mark_distance();
 
-  // Allocates the back store for the marking bitmap
-  bool allocate(ReservedSpace heap_rs);
+  CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 
-  // write marks
-  void mark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.set_bit(heapWordToOffset(addr));
-  }
-  void clear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.clear_bit(heapWordToOffset(addr));
-  }
-  bool parMark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_set_bit(heapWordToOffset(addr));
-  }
-  bool parClear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_clear_bit(heapWordToOffset(addr));
-  }
+  // Initializes the underlying BitMap to cover the given area.
+  void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
+
+  // Write marks.
+  inline void mark(HeapWord* addr);
+  inline void clear(HeapWord* addr);
+  inline bool parMark(HeapWord* addr);
+  inline bool parClear(HeapWord* addr);
+
   void markRange(MemRegion mr);
-  void clearAll();
   void clearRange(MemRegion mr);
 
   // Starting at the bit corresponding to "addr" (inclusive), find the next
@@ -161,6 +161,9 @@
   // the run.  If there is no "1" bit at or after "addr", return an empty
   // MemRegion.
   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
+
+  // Clear the whole mark bitmap.
+  void clearAll();
 };
 
 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
@@ -680,7 +683,7 @@
     return _task_queues->steal(worker_id, hash_seed, obj);
   }
 
-  ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
+  ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
   ~ConcurrentMark();
 
   ConcurrentMarkThread* cmThread() { return _cmThread; }
@@ -736,7 +739,8 @@
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
 
-  // Return whether the next mark bitmap has no marks set.
+  // Return whether the next mark bitmap has no marks set. To be used for assertions
+  // only. Will not yield to pause requests.
   bool nextMarkBitmapIsClear();
 
   // These two do the work that needs to be done before and after the
@@ -794,12 +798,6 @@
                            bool verify_thread_buffers,
                            bool verify_fingers) PRODUCT_RETURN;
 
-  // It is called at the end of an evacuation pause during marking so
-  // that CM is notified of where the new end of the heap is. It
-  // doesn't do anything if concurrent_marking_in_progress() is false,
-  // unless the force parameter is true.
-  void update_g1_committed(bool force = false);
-
   bool isMarked(oop p) const {
     assert(p != NULL && p->is_oop(), "expected an oop");
     HeapWord* addr = (HeapWord*)p;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -268,6 +268,36 @@
   return iterate(cl, mr);
 }
 
+#define check_mark(addr)                                                       \
+  assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize),      \
+         "outside underlying space?");                                         \
+  assert(G1CollectedHeap::heap()->is_in_exact(addr),                           \
+         err_msg("Trying to access not available bitmap "PTR_FORMAT            \
+                 " corresponding to "PTR_FORMAT" (%u)",                        \
+                 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
+
+inline void CMBitMap::mark(HeapWord* addr) {
+  check_mark(addr);
+  _bm.set_bit(heapWordToOffset(addr));
+}
+
+inline void CMBitMap::clear(HeapWord* addr) {
+  check_mark(addr);
+  _bm.clear_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parMark(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_set_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parClear(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_clear_bit(heapWordToOffset(addr));
+}
+
+#undef check_mark
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -173,7 +173,7 @@
 
   // Should be called when we want to release the active region which
   // is returned after it's been retired.
-  HeapRegion* release();
+  virtual HeapRegion* release();
 
 #if G1_ALLOC_REGION_TRACING
   void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -32,64 +32,37 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
+  // retrieve it here since this would cause firing of several asserts. The code
+  // executed after commit of a region already needs to do some re-initialization of
+  // the HeapRegion, so we combine that.
+}
+
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetSharedArray
 //////////////////////////////////////////////////////////////////////
 
-G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
-                                                   size_t init_word_size) :
-  _reserved(reserved), _end(NULL)
-{
-  size_t size = compute_size(reserved.word_size());
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
-  if (!rs.is_reserved()) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
-  if (!_vs.initialize(rs, 0)) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
+G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
+  _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
+
+  _reserved = heap;
+  _end = NULL;
 
-  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+  MemRegion bot_reserved = storage->reserved();
 
-  _offset_array = (u_char*)_vs.low_boundary();
-  resize(init_word_size);
+  _offset_array = (u_char*)bot_reserved.start();
+  _end = _reserved.end();
+
+  storage->set_mapping_changed_listener(&_listener);
+
   if (TraceBlockOffsetTable) {
     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
     gclog_or_tty->print_cr("  "
                   "  rs.base(): " INTPTR_FORMAT
                   "  rs.size(): " INTPTR_FORMAT
                   "  rs end(): " INTPTR_FORMAT,
-                  rs.base(), rs.size(), rs.base() + rs.size());
-    gclog_or_tty->print_cr("  "
-                  "  _vs.low_boundary(): " INTPTR_FORMAT
-                  "  _vs.high_boundary(): " INTPTR_FORMAT,
-                  _vs.low_boundary(),
-                  _vs.high_boundary());
-  }
-}
-
-void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
-  assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
-  size_t new_size = compute_size(new_word_size);
-  size_t old_size = _vs.committed_size();
-  size_t delta;
-  char* high = _vs.high();
-  _end = _reserved.start() + new_word_size;
-  if (new_size > old_size) {
-    delta = ReservedSpace::page_align_size_up(new_size - old_size);
-    assert(delta > 0, "just checking");
-    if (!_vs.expand_by(delta)) {
-      // Do better than this for Merlin
-      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
-    }
-    assert(_vs.high() == high + delta, "invalid expansion");
-    // Initialization of the contents is left to the
-    // G1BlockOffsetArray that uses it.
-  } else {
-    delta = ReservedSpace::page_align_size_down(old_size - new_size);
-    if (delta == 0) return;
-    _vs.shrink_by(delta);
-    assert(_vs.high() == high - delta, "invalid expansion");
+                  bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
   }
 }
 
@@ -100,18 +73,7 @@
 }
 
 void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
-  check_index(index_for(right - 1), "right address out of range");
-  assert(left  < right, "Heap addresses out of order");
-  size_t num_cards = pointer_delta(right, left) >> LogN_words;
-  if (UseMemSetInBOT) {
-    memset(&_offset_array[index_for(left)], offset, num_cards);
-  } else {
-    size_t i = index_for(left);
-    const size_t end = i + num_cards;
-    for (; i < end; i++) {
-      _offset_array[i] = offset;
-    }
-  }
+  set_offset_array(index_for(left), index_for(right -1), offset);
 }
 
 //////////////////////////////////////////////////////////////////////
@@ -650,6 +612,25 @@
   _next_offset_index = 0;
 }
 
+HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  _next_offset_index = _array->index_for_raw(_bottom);
+  _next_offset_index++;
+  _next_offset_threshold =
+    _array->address_for_index_raw(_next_offset_index);
+  return _next_offset_threshold;
+}
+
+void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  size_t bottom_index = _array->index_for_raw(_bottom);
+  assert(_array->address_for_index_raw(bottom_index) == _bottom,
+         "Precondition of call");
+  _array->set_offset_array_raw(bottom_index, 0);
+}
+
 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
@@ -674,8 +655,7 @@
   assert(new_top <= _end, "_end should have already been updated");
 
   // The first BOT entry should have offset 0.
-  zero_bottom_entry();
-  initialize_threshold();
+  reset_bot();
   alloc_block(_bottom, new_top);
  }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/memRegion.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -106,6 +107,11 @@
   inline HeapWord* block_start_const(const void* addr) const;
 };
 
+class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 // This implementation of "G1BlockOffsetTable" divides the covered region
 // into "N"-word subregions (where "N" = 2^"LogN".  An array with an entry
 // for each such subregion indicates how far back one must go to find the
@@ -125,6 +131,7 @@
   friend class VMStructs;
 
 private:
+  G1BlockOffsetSharedArrayMappingChangedListener _listener;
   // The reserved region covered by the shared array.
   MemRegion _reserved;
 
@@ -133,16 +140,8 @@
 
   // Array for keeping offsets for retrieving object start fast given an
   // address.
-  VirtualSpace _vs;
   u_char* _offset_array;          // byte array keeping backwards offsets
 
-  void check_index(size_t index, const char* msg) const {
-    assert(index < _vs.committed_size(),
-           err_msg("%s - "
-                   "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
-                   msg, index, _vs.committed_size()));
-  }
-
   void check_offset(size_t offset, const char* msg) const {
     assert(offset <= N_words,
            err_msg("%s - "
@@ -152,63 +151,33 @@
 
   // Bounds checking accessors:
   // For performance these have to devolve to array accesses in product builds.
-  u_char offset_array(size_t index) const {
-    check_index(index, "index out of range");
-    return _offset_array[index];
-  }
+  inline u_char offset_array(size_t index) const;
 
   void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
 
-  void set_offset_array(size_t index, u_char offset) {
-    check_index(index, "index out of range");
-    check_offset(offset, "offset too large");
+  void set_offset_array_raw(size_t index, u_char offset) {
     _offset_array[index] = offset;
   }
 
-  void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    _offset_array[index] = (u_char) pointer_delta(high, low);
-  }
+  inline void set_offset_array(size_t index, u_char offset);
+
+  inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
 
-  void set_offset_array(size_t left, size_t right, u_char offset) {
-    check_index(right, "right index out of range");
-    assert(left <= right, "indexes out of order");
-    size_t num_cards = right - left + 1;
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[left], offset, num_cards);
-    } else {
-      size_t i = left;
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        _offset_array[i] = offset;
-      }
-    }
-  }
+  inline void set_offset_array(size_t left, size_t right, u_char offset);
 
-  void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
-  }
+  inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
 
   bool is_card_boundary(HeapWord* p) const;
 
+public:
+
   // Return the number of slots needed for an offset array
   // that covers mem_region_words words.
-  // We always add an extra slot because if an object
-  // ends on a card boundary we put a 0 in the next
-  // offset array slot, so we want that slot always
-  // to be reserved.
-
-  size_t compute_size(size_t mem_region_words) {
-    size_t number_of_slots = (mem_region_words / N_words) + 1;
-    return ReservedSpace::page_align_size_up(number_of_slots);
+  static size_t compute_size(size_t mem_region_words) {
+    size_t number_of_slots = (mem_region_words / N_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
   }
 
-public:
   enum SomePublicConstants {
     LogN = 9,
     LogN_words = LogN - LogHeapWordSize,
@@ -222,25 +191,21 @@
   // least "init_word_size".) The contents of the initial table are
   // undefined; it is the responsibility of the constituent
   // G1BlockOffsetTable(s) to initialize cards.
-  G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
-
-  // Notes a change in the committed size of the region covered by the
-  // table.  The "new_word_size" may not be larger than the size of the
-  // reserved region this table covers.
-  void resize(size_t new_word_size);
+  G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
 
   void set_bottom(HeapWord* new_bottom);
 
-  // Updates all the BlockOffsetArray's sharing this shared array to
-  // reflect the current "top"'s of their spaces.
-  void update_offset_arrays();
-
   // Return the appropriate index into "_offset_array" for "p".
   inline size_t index_for(const void* p) const;
+  inline size_t index_for_raw(const void* p) const;
 
   // Return the address indicating the start of the region corresponding to
   // "index" in "_offset_array".
   inline HeapWord* address_for_index(size_t index) const;
+  // Variant of address_for_index that does not check the index for validity.
+  inline HeapWord* address_for_index_raw(size_t index) const {
+    return _reserved.start() + (index << LogN_words);
+  }
 };
 
 // And here is the G1BlockOffsetTable subtype that uses the array.
@@ -480,6 +445,14 @@
                       blk_start, blk_end);
   }
 
+  // Variant of zero_bottom_entry that does not check for availability of the
+  // memory first.
+  void zero_bottom_entry_raw();
+  // Variant of initialize_threshold that does not check for availability of the
+  // memory first.
+  HeapWord* initialize_threshold_raw();
+  // Zero out the entry for _bottom (offset will be zero).
+  void zero_bottom_entry();
  public:
   G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
 
@@ -487,8 +460,10 @@
   // bottom of the covered region.
   HeapWord* initialize_threshold();
 
-  // Zero out the entry for _bottom (offset will be zero).
-  void      zero_bottom_entry();
+  void reset_bot() {
+    zero_bottom_entry_raw();
+    initialize_threshold_raw();
+  }
 
   // Return the next threshold, the point at which the table should be
   // updated.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -47,14 +47,69 @@
   }
 }
 
+#define check_index(index, msg)                                                \
+  assert((index) < (_reserved.word_size() >> LogN_words),                      \
+         err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
+                 msg, (index), (_reserved.word_size() >> LogN_words)));        \
+  assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),   \
+         err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT             \
+                 " (%u) is not in committed area.",                            \
+                 (index),                                                      \
+                 p2i(address_for_index_raw(index)),                            \
+                 G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
+
+u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
+  check_index(index, "index out of range");
+  return _offset_array[index];
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
+  check_index(index, "index out of range");
+  set_offset_array_raw(index, offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
+  check_index(index, "index out of range");
+  assert(high >= low, "addresses out of order");
+  size_t offset = pointer_delta(high, low);
+  check_offset(offset, "offset too large");
+  set_offset_array(index, (u_char)offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
+  check_index(right, "right index out of range");
+  assert(left <= right, "indexes out of order");
+  size_t num_cards = right - left + 1;
+  if (UseMemSetInBOT) {
+    memset(&_offset_array[left], offset, num_cards);
+  } else {
+    size_t i = left;
+    const size_t end = i + num_cards;
+    for (; i < end; i++) {
+      _offset_array[i] = offset;
+    }
+  }
+}
+
+void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
+  check_index(index, "index out of range");
+  assert(high >= low, "addresses out of order");
+  check_offset(pointer_delta(high, low), "offset too large");
+  assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
+}
+
+// Variant of index_for that does not check the index for validity.
+inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
+  return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
+}
+
 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
   char* pc = (char*)p;
   assert(pc >= (char*)_reserved.start() &&
          pc <  (char*)_reserved.end(),
          err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
                  p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
-  size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
-  size_t result = delta >> LogN;
+  size_t result = index_for_raw(p);
   check_index(result, "bad index from address");
   return result;
 }
@@ -62,7 +117,7 @@
 inline HeapWord*
 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
   check_index(index, "index out of range");
-  HeapWord* result = _reserved.start() + (index << LogN_words);
+  HeapWord* result = address_for_index_raw(index);
   assert(result >= _reserved.start() && result < _reserved.end(),
          err_msg("bad address from index result " PTR_FORMAT
                  " _reserved.start() " PTR_FORMAT " _reserved.end() "
@@ -71,6 +126,8 @@
   return result;
 }
 
+#undef check_index
+
 inline size_t
 G1BlockOffsetArray::block_size(const HeapWord* p) const {
   return gsp()->block_size(p);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -33,31 +33,26 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _counts->clear_range(mr);
+}
+
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
-           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
-    assert(to_card_num <= _committed_max_card_num,
-           err_msg("to card num out of range: "
-                   "to: "SIZE_FORMAT ", "
-                   "max: "SIZE_FORMAT,
-                   to_card_num, _committed_max_card_num));
-
-    to_card_num = MIN2(_committed_max_card_num, to_card_num);
-
     Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
   }
 }
 
 G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
-  _g1h(g1h), _card_counts(NULL),
-  _reserved_max_card_num(0), _committed_max_card_num(0),
-  _committed_size(0) {}
+  _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
+  _listener.set_cardcounts(this);
+}
 
-void G1CardCounts::initialize() {
+void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
   assert(_g1h->max_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
@@ -70,70 +65,9 @@
     _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
-    // Allocate/Reserve the counts table
-    size_t reserved_bytes = _g1h->max_capacity();
-    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
-
-    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
-    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
-    if (!rs.is_reserved()) {
-      warning("Could not reserve enough space for the card counts table");
-      guarantee(!has_reserved_count_table(), "should be NULL");
-      return;
-    }
-
-    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
-
-    _card_counts_storage.initialize(rs, 0);
-    _card_counts = (jubyte*) _card_counts_storage.low();
-  }
-}
-
-void G1CardCounts::resize(size_t heap_capacity) {
-  // Expand the card counts table to handle a heap with the given capacity.
-
-  if (!has_reserved_count_table()) {
-    // Don't expand if we failed to reserve the card counts table.
-    return;
-  }
-
-  assert(_committed_size ==
-         ReservedSpace::allocation_align_size_up(_committed_size),
-         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
-
-  // Verify that the committed space for the card counts matches our
-  // committed max card num. Note for some allocation alignments, the
-  // amount of space actually committed for the counts table will be able
-  // to span more cards than the number spanned by the maximum heap.
-  size_t prev_committed_size = _committed_size;
-  size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
-
-  assert(prev_committed_card_num == _committed_max_card_num,
-         err_msg("Card mismatch: "
-                 "prev: " SIZE_FORMAT ", "
-                 "committed: "SIZE_FORMAT", "
-                 "reserved: "SIZE_FORMAT,
-                 prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
-
-  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
-  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
-  size_t new_committed_card_num = committed_to_card_num(new_committed_size);
-
-  if (_committed_max_card_num < new_committed_card_num) {
-    // we need to expand the backing store for the card counts
-    size_t expand_size = new_committed_size - prev_committed_size;
-
-    if (!_card_counts_storage.expand_by(expand_size)) {
-      warning("Card counts table backing store commit failure");
-      return;
-    }
-    assert(_card_counts_storage.committed_size() == new_committed_size,
-           "expansion commit failure");
-
-    _committed_size = new_committed_size;
-    _committed_max_card_num = new_committed_card_num;
-
-    clear_range(prev_committed_card_num, _committed_max_card_num);
+    _card_counts = (jubyte*) mapper->reserved().start();
+    _reserved_max_card_num = mapper->reserved().byte_size();
+    mapper->set_mapping_changed_listener(&_listener);
   }
 }
 
@@ -149,12 +83,13 @@
   uint count = 0;
   if (has_count_table()) {
     size_t card_num = ptr_2_card_num(card_ptr);
-    if (card_num < _committed_max_card_num) {
-      count = (uint) _card_counts[card_num];
-      if (count < G1ConcRSHotCardLimit) {
-        _card_counts[card_num] =
-          (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
-      }
+    assert(card_num < _reserved_max_card_num,
+           err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
+                   card_num, _reserved_max_card_num));
+    count = (uint) _card_counts[card_num];
+    if (count < G1ConcRSHotCardLimit) {
+      _card_counts[card_num] =
+        (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
     }
   }
   return count;
@@ -165,31 +100,23 @@
 }
 
 void G1CardCounts::clear_region(HeapRegion* hr) {
-  assert(!hr->isHumongous(), "Should have been cleared");
-  if (has_count_table()) {
-    HeapWord* bottom = hr->bottom();
+  MemRegion mr(hr->bottom(), hr->end());
+  clear_range(mr);
+}
 
-    // We use the last address in hr as hr could be the
-    // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB access to the
-    // card table.
-    HeapWord* last = hr->end() - 1;
-    assert(_g1h->g1_committed().contains(last),
-           err_msg("last not in committed: "
-                   "last: " PTR_FORMAT ", "
-                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
-                   last,
-                   _g1h->g1_committed().start(),
-                   _g1h->g1_committed().end()));
-
-    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
-    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
+void G1CardCounts::clear_range(MemRegion mr) {
+  if (has_count_table()) {
+    const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
+    // We use the last address in the range as the range could represent the
+    // last region in the heap. In which case trying to find the card will be an
+    // OOB access to the card table.
+    const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
 
 #ifdef ASSERT
     HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
-    assert(start_addr == hr->bottom(), "alignment");
+    assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
     HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
-    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
+    assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
 #endif // ASSERT
 
     // Clear the counts for the (exclusive) card range.
@@ -199,14 +126,22 @@
   }
 }
 
+class G1CardCountsClearClosure : public HeapRegionClosure {
+ private:
+  G1CardCounts* _card_counts;
+ public:
+  G1CardCountsClearClosure(G1CardCounts* card_counts) :
+    HeapRegionClosure(), _card_counts(card_counts) { }
+
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _card_counts->clear_region(r);
+    return false;
+  }
+};
+
 void G1CardCounts::clear_all() {
   assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
-  clear_range((size_t)0, _committed_max_card_num);
+  G1CardCountsClearClosure cl(this);
+  _g1h->heap_region_iterate(&cl);
 }
-
-G1CardCounts::~G1CardCounts() {
-  if (has_reserved_count_table()) {
-    _card_counts_storage.release();
-  }
-}
-
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -25,14 +25,26 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class CardTableModRefBS;
+class G1CardCounts;
 class G1CollectedHeap;
+class G1RegionToSpaceMapper;
 class HeapRegion;
 
+class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
+ private:
+  G1CardCounts* _counts;
+ public:
+  void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 // Table to track the number of times a card has been refined. Once
 // a card has been refined a certain number of times, it is
 // considered 'hot' and its refinement is delayed by inserting the
@@ -41,6 +53,8 @@
 // is 'drained' during the next evacuation pause.
 
 class G1CardCounts: public CHeapObj<mtGC> {
+  G1CardCountsMappingChangedListener _listener;
+
   G1CollectedHeap* _g1h;
 
   // The table of counts
@@ -49,27 +63,18 @@
   // Max capacity of the reserved space for the counts table
   size_t _reserved_max_card_num;
 
-  // Max capacity of the committed space for the counts table
-  size_t _committed_max_card_num;
-
-  // Size of committed space for the counts table
-  size_t _committed_size;
-
   // CardTable bottom.
   const jbyte* _ct_bot;
 
   // Barrier set
   CardTableModRefBS* _ct_bs;
 
-  // The virtual memory backing the counts table
-  VirtualSpace _card_counts_storage;
-
   // Returns true if the card counts table has been reserved.
   bool has_reserved_count_table() { return _card_counts != NULL; }
 
   // Returns true if the card counts table has been reserved and committed.
   bool has_count_table() {
-    return has_reserved_count_table() && _committed_max_card_num > 0;
+    return has_reserved_count_table();
   }
 
   size_t ptr_2_card_num(const jbyte* card_ptr) {
@@ -79,37 +84,24 @@
                    "_ct_bot: " PTR_FORMAT,
                    p2i(card_ptr), p2i(_ct_bot)));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
-  // Helper routine.
-  // Returns the number of cards that can be counted by the given committed
-  // table size, with a maximum of the number of cards spanned by the max
-  // capacity of the heap.
-  size_t committed_to_card_num(size_t committed_size) {
-    return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
-  }
-
   // Clear the counts table for the given (exclusive) index range.
   void clear_range(size_t from_card_num, size_t to_card_num);
 
  public:
   G1CardCounts(G1CollectedHeap* g1h);
-  ~G1CardCounts();
 
-  void initialize();
-
-  // Resize the committed space for the card counts table in
-  // response to a resize of the committed space for the heap.
-  void resize(size_t heap_capacity);
+  void initialize(G1RegionToSpaceMapper* mapper);
 
   // Increments the refinement count for the given card.
   // Returns the pre-increment count value.
@@ -122,8 +114,10 @@
   // Clears the card counts for the cards spanned by the region
   void clear_region(HeapRegion* hr);
 
+  // Clears the card counts for the cards spanned by the MemRegion
+  void clear_range(MemRegion mr);
+
   // Clear the entire card counts table during GC.
-  // Updates the policy stats with the duration.
   void clear_all();
 };
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -45,12 +45,13 @@
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
@@ -381,6 +382,14 @@
   gclog_or_tty->cr();
 }
 
+void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
+  OtherRegionsTable::invalidate(start_idx, num_regions);
+}
+
+void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  reset_from_card_cache(start_idx, num_regions);
+}
+
 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 {
   // Claim the right to put the region on the dirty cards region list
@@ -523,9 +532,9 @@
       // again to allocate from it.
       append_secondary_free_list();
 
-      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
+      assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
              "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _free_list.remove_region(is_old);
+      HeapRegion* res = _hrs.allocate_free_region(is_old);
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                                "allocated "HR_FORMAT" from secondary_free_list",
@@ -566,7 +575,7 @@
     }
   }
 
-  res = _free_list.remove_region(is_old);
+  res = _hrs.allocate_free_region(is_old);
 
   if (res == NULL) {
     if (G1ConcRegionFreeingVerbose) {
@@ -591,8 +600,8 @@
       // Given that expand() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
-      // In either case remove_region() will check for NULL.
-      res = _free_list.remove_region(is_old);
+      // In either case allocate_free_region() will check for NULL.
+      res = _hrs.allocate_free_region(is_old);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -600,55 +609,11 @@
   return res;
 }
 
-uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
-                                                        size_t word_size) {
-  assert(isHumongous(word_size), "word_size should be humongous");
-  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
-
-  uint first = G1_NULL_HRS_INDEX;
-  if (num_regions == 1) {
-    // Only one region to allocate, no need to go through the slower
-    // path. The caller will attempt the expansion if this fails, so
-    // let's not try to expand here too.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
-    if (hr != NULL) {
-      first = hr->hrs_index();
-    } else {
-      first = G1_NULL_HRS_INDEX;
-    }
-  } else {
-    // We can't allocate humongous regions while cleanupComplete() is
-    // running, since some of the regions we find to be empty might not
-    // yet be added to the free list and it is not straightforward to
-    // know which list they are on so that we can remove them. Note
-    // that we only need to do this if we need to allocate more than
-    // one region to satisfy the current humongous allocation
-    // request. If we are only allocating one region we use the common
-    // region allocation code (see above).
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
-    if (free_regions() >= num_regions) {
-      first = _hrs.find_contiguous(num_regions);
-      if (first != G1_NULL_HRS_INDEX) {
-        for (uint i = first; i < first + num_regions; ++i) {
-          HeapRegion* hr = region_at(i);
-          assert(hr->is_empty(), "sanity");
-          assert(is_on_master_free_list(hr), "sanity");
-          hr->set_pending_removal(true);
-        }
-        _free_list.remove_all_pending(num_regions);
-      }
-    }
-  }
-  return first;
-}
-
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
                                                            size_t word_size) {
-  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
+  assert(first != G1_NO_HRS_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
@@ -786,42 +751,70 @@
 
   verify_region_sets_optional();
 
-  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
-  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
-  uint x_num = expansion_regions();
-  uint fs = _hrs.free_suffix();
-  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
-  if (first == G1_NULL_HRS_INDEX) {
-    // The only thing we can do now is attempt expansion.
-    if (fs + x_num >= num_regions) {
-      // If the number of regions we're trying to allocate for this
-      // object is at most the number of regions in the free suffix,
-      // then the call to humongous_obj_allocate_find_first() above
-      // should have succeeded and we wouldn't be here.
-      //
-      // We should only be trying to expand when the free suffix is
-      // not sufficient for the object _and_ we have some expansion
-      // room available.
-      assert(num_regions > fs, "earlier allocation should have succeeded");
-
+  uint first = G1_NO_HRS_INDEX;
+  uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
+
+  if (obj_regions == 1) {
+    // Only one region to allocate, try to use a fast path by directly allocating
+    // from the free lists. Do not try to expand here, we will potentially do that
+    // later.
+    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    if (hr != NULL) {
+      first = hr->hrs_index();
+    }
+  } else {
+    // We can't allocate humongous regions spanning more than one region while
+    // cleanupComplete() is running, since some of the regions we find to be
+    // empty might not yet be added to the free list. It is not straightforward
+    // to know in which list they are on so that we can remove them. We only
+    // need to do this if we need to allocate more than one region to satisfy the
+    // current humongous allocation request. If we are only allocating one region
+    // we use the one-region region allocation code (see above), that already
+    // potentially waits for regions from the secondary free list.
+    wait_while_free_regions_coming();
+    append_secondary_free_list_if_not_empty_with_lock();
+
+    // Policy: Try only empty regions (i.e. already committed first). Maybe we
+    // are lucky enough to find some.
+    first = _hrs.find_contiguous_only_empty(obj_regions);
+    if (first != G1_NO_HRS_INDEX) {
+      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+    }
+  }
+
+  if (first == G1_NO_HRS_INDEX) {
+    // Policy: We could not find enough regions for the humongous object in the
+    // free list. Look through the heap to find a mix of free and uncommitted regions.
+    // If so, try expansion.
+    first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
+    if (first != G1_NO_HRS_INDEX) {
+      // We found something. Make sure these regions are committed, i.e. expand
+      // the heap. Alternatively we could do a defragmentation GC.
       ergo_verbose1(ErgoHeapSizing,
                     "attempt heap expansion",
                     ergo_format_reason("humongous allocation request failed")
                     ergo_format_byte("allocation request"),
                     word_size * HeapWordSize);
-      if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
-        // Even though the heap was expanded, it might not have
-        // reached the desired size. So, we cannot assume that the
-        // allocation will succeed.
-        first = humongous_obj_allocate_find_first(num_regions, word_size);
+
+      _hrs.expand_at(first, obj_regions);
+      g1_policy()->record_new_heap_size(num_regions());
+
+#ifdef ASSERT
+      for (uint i = first; i < first + obj_regions; ++i) {
+        HeapRegion* hr = region_at(i);
+        assert(hr->is_empty(), "sanity");
+        assert(is_on_master_free_list(hr), "sanity");
       }
+#endif
+      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+    } else {
+      // Policy: Potentially trigger a defragmentation GC.
     }
   }
 
   HeapWord* result = NULL;
-  if (first != G1_NULL_HRS_INDEX) {
-    result =
-      humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
+  if (first != G1_NO_HRS_INDEX) {
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
@@ -1384,7 +1377,7 @@
         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
       }
 
-      assert(free_regions() == 0, "we should not have added any free regions");
+      assert(num_free_regions() == 0, "we should not have added any free regions");
       rebuild_region_sets(false /* free_list_only */);
 
       // Enqueue any discovered reference objects that have
@@ -1749,21 +1742,6 @@
   return NULL;
 }
 
-void G1CollectedHeap::update_committed_space(HeapWord* old_end,
-                                             HeapWord* new_end) {
-  assert(old_end != new_end, "don't call this otherwise");
-  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
-
-  // Update the committed mem region.
-  _g1_committed.set_end(new_end);
-  // Tell the card table about the update.
-  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-  // Tell the BOT about the update.
-  _bot_shared->resize(_g1_committed.word_size());
-  // Tell the hot card cache about the update
-  _cg1r->hot_card_cache()->resize_card_counts(capacity());
-}
-
 bool G1CollectedHeap::expand(size_t expand_bytes) {
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
@@ -1774,55 +1752,22 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
-  if (_g1_storage.uncommitted_size() == 0) {
+  if (is_maximal_no_gc()) {
     ergo_verbose0(ErgoHeapSizing,
                       "did not expand the heap",
                       ergo_format_reason("heap already fully expanded"));
     return false;
   }
 
-  // First commit the memory.
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
-  bool successful = _g1_storage.expand_by(aligned_expand_bytes);
-  if (successful) {
-    // Then propagate this update to the necessary data structures.
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-    update_committed_space(old_end, new_end);
-
-    FreeRegionList expansion_list("Local Expansion List");
-    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
-    assert(mr.start() == old_end, "post-condition");
-    // mr might be a smaller region than what was requested if
-    // expand_by() was unable to allocate the HeapRegion instances
-    assert(mr.end() <= new_end, "post-condition");
-
-    size_t actual_expand_bytes = mr.byte_size();
+  uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
+  assert(regions_to_expand > 0, "Must expand by at least one region");
+
+  uint expanded_by = _hrs.expand_by(regions_to_expand);
+
+  if (expanded_by > 0) {
+    size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
-    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
-           "post-condition");
-    if (actual_expand_bytes < aligned_expand_bytes) {
-      // We could not expand _hrs to the desired size. In this case we
-      // need to shrink the committed space accordingly.
-      assert(mr.end() < new_end, "invariant");
-
-      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
-      // First uncommit the memory.
-      _g1_storage.shrink_by(diff_bytes);
-      // Then propagate this update to the necessary data structures.
-      update_committed_space(new_end, mr.end());
-    }
-    _free_list.add_as_tail(&expansion_list);
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = mr.start();
-      while (curr < mr.end()) {
-        HeapWord* curr_end = curr + HeapRegion::GrainWords;
-        _hr_printer.commit(curr, curr_end);
-        curr = curr_end;
-      }
-      assert(curr == mr.end(), "post-condition");
-    }
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not expand the heap",
@@ -1830,12 +1775,12 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
+        _hrs.available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
   }
-  return successful;
+  return regions_to_expand > 0;
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
@@ -1846,7 +1791,6 @@
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
@@ -1856,22 +1800,7 @@
                 ergo_format_byte("attempted shrinking amount"),
                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
-    _g1_storage.shrink_by(shrunk_bytes);
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = old_end;
-      while (curr > new_end) {
-        HeapWord* curr_end = curr;
-        curr -= HeapRegion::GrainWords;
-        _hr_printer.uncommit(curr, curr_end);
-      }
-    }
-
-    _expansion_regions += num_regions_removed;
-    update_committed_space(old_end, new_end);
-    HeapRegionRemSet::shrink_heap(n_regions());
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not shrink the heap",
@@ -1922,7 +1851,6 @@
   _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
-  _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
@@ -2036,8 +1964,6 @@
   _reserved.set_start((HeapWord*)heap_rs.base());
   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
-
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
@@ -2051,20 +1977,65 @@
 
   // Carve out the G1 part of the heap.
 
-  ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
-  _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
-                           g1_rs.size()/HeapWordSize);
-
-  _g1_storage.initialize(g1_rs, 0);
-  _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
-  _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end());
-  assert(_hrs.max_length() == _expansion_regions,
-         err_msg("max length: %u expansion regions: %u",
-                 _hrs.max_length(), _expansion_regions));
-
-  // Do later initialization work for concurrent refinement.
-  _cg1r->init();
+  ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
+  G1RegionToSpaceMapper* heap_storage =
+    G1RegionToSpaceMapper::create_mapper(g1_rs,
+                                         UseLargePages ? os::large_page_size() : os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         1,
+                                         mtJavaHeap);
+  heap_storage->set_mapping_changed_listener(&_listener);
+
+  // Reserve space for the block offset table. We do not support automatic uncommit
+  // for the card table at this time. BOT only.
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* cardtable_storage =
+    G1RegionToSpaceMapper::create_mapper(cardtable_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for the card counts table.
+  ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* card_counts_storage =
+    G1RegionToSpaceMapper::create_mapper(card_counts_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for prev and next bitmap.
+  size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
+
+  ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* prev_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* next_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  g1_barrier_set()->initialize(cardtable_storage);
+   // Do later initialization work for concurrent refinement.
+  _cg1r->init(card_counts_storage);
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
@@ -2078,17 +2049,16 @@
 
   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
 
-  _bot_shared = new G1BlockOffsetSharedArray(_reserved,
-                                             heap_word_size(init_byte_size));
+  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
 
   _g1h = this;
 
-  _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
-  _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
+  _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
+  _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
-  _cm = new ConcurrentMark(this, heap_rs);
+  _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
   if (_cm == NULL || !_cm->completed_initialization()) {
     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
     return JNI_ENOMEM;
@@ -2143,12 +2113,10 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  // Here we allocate the dummy full region that is required by the
-  // G1AllocRegion class. If we don't pass an address in the reserved
-  // space here, lots of asserts fire.
-
-  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
-                                             _g1_reserved.start());
+  // Here we allocate the dummy HeapRegion that is required by the
+  // G1AllocRegion class.
+  HeapRegion* dummy_region = _hrs.get_dummy_region();
+
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
@@ -2264,7 +2232,7 @@
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _g1_committed.byte_size();
+  return _hrs.length() * HeapRegion::GrainBytes;
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
@@ -2548,7 +2516,7 @@
         }
       }
     } else {
-      if (cause == GCCause::_gc_locker
+      if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
 
         // Schedule a standard evacuation pause. We're setting word_size
@@ -2569,8 +2537,8 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_g1_committed.contains(p)) {
-    // Given that we know that p is in the committed space,
+  if (_hrs.reserved().contains(p)) {
+    // Given that we know that p is in the reserved space,
     // heap_region_containing_raw() should successfully
     // return the containing region.
     HeapRegion* hr = heap_region_containing_raw(p);
@@ -2580,6 +2548,18 @@
   }
 }
 
+#ifdef ASSERT
+bool G1CollectedHeap::is_in_exact(const void* p) const {
+  bool contains = reserved_region().contains(p);
+  bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
+  if (contains && available) {
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif
+
 // Iteration functions.
 
 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@@ -2644,83 +2624,9 @@
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
                                                  uint worker_id,
-                                                 uint no_of_par_workers,
-                                                 jint claim_value) {
-  const uint regions = n_regions();
-  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                             no_of_par_workers :
-                             1);
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  // try to spread out the starting points of the workers
-  const HeapRegion* start_hr =
-                        start_region_for_worker(worker_id, no_of_par_workers);
-  const uint start_index = start_hr->hrs_index();
-
-  // each worker will actually look at all regions
-  for (uint count = 0; count < regions; ++count) {
-    const uint index = (start_index + count) % regions;
-    assert(0 <= index && index < regions, "sanity");
-    HeapRegion* r = region_at(index);
-    // we'll ignore "continues humongous" regions (we'll process them
-    // when we come across their corresponding "start humongous"
-    // region) and regions already claimed
-    if (r->claim_value() == claim_value || r->continuesHumongous()) {
-      continue;
-    }
-    // OK, try to claim it
-    if (r->claimHeapRegion(claim_value)) {
-      // success!
-      assert(!r->continuesHumongous(), "sanity");
-      if (r->startsHumongous()) {
-        // If the region is "starts humongous" we'll iterate over its
-        // "continues humongous" first; in fact we'll do them
-        // first. The order is important. In on case, calling the
-        // closure on the "starts humongous" region might de-allocate
-        // and clear all its "continues humongous" regions and, as a
-        // result, we might end up processing them twice. So, we'll do
-        // them first (notice: most closures will ignore them anyway) and
-        // then we'll do the "starts humongous" region.
-        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
-          HeapRegion* chr = region_at(ch_index);
-
-          // if the region has already been claimed or it's not
-          // "continues humongous" we're done
-          if (chr->claim_value() == claim_value ||
-              !chr->continuesHumongous()) {
-            break;
-          }
-
-          // No one should have claimed it directly. We can given
-          // that we claimed its "starts humongous" region.
-          assert(chr->claim_value() != claim_value, "sanity");
-          assert(chr->humongous_start_region() == r, "sanity");
-
-          if (chr->claimHeapRegion(claim_value)) {
-            // we should always be able to claim it; no one else should
-            // be trying to claim this region
-
-            bool res2 = cl->doHeapRegion(chr);
-            assert(!res2, "Should not abort");
-
-            // Right now, this holds (i.e., no closure that actually
-            // does something with "continues humongous" regions
-            // clears them). We might have to weaken it in the future,
-            // but let's leave these two asserts here for extra safety.
-            assert(chr->continuesHumongous(), "should still be the case");
-            assert(chr->humongous_start_region() == r, "sanity");
-          } else {
-            guarantee(false, "we should not reach here");
-          }
-        }
-      }
-
-      assert(!r->continuesHumongous(), "sanity");
-      bool res = cl->doHeapRegion(r);
-      assert(!res, "Should not abort");
-    }
-  }
+                                                 uint num_workers,
+                                                 jint claim_value) const {
+  _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
 }
 
 class ResetClaimValuesClosure: public HeapRegionClosure {
@@ -2898,17 +2804,6 @@
   return result;
 }
 
-HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
-                                                     uint no_of_par_workers) {
-  uint worker_num =
-           G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  const uint start_index = n_regions() * worker_i / worker_num;
-  return region_at(start_index);
-}
-
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
   HeapRegion* r = g1_policy()->collection_set();
   while (r != NULL) {
@@ -2951,15 +2846,11 @@
 }
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
-  // We're not using an iterator given that it will wrap around when
-  // it reaches the last region and this is not what we want here.
-  for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
-    HeapRegion* hr = region_at(index);
-    if (!hr->isHumongous()) {
-      return hr;
-    }
-  }
-  return NULL;
+  HeapRegion* result = _hrs.next_region_in_heap(from);
+  while (result != NULL && result->isHumongous()) {
+    result = _hrs.next_region_in_heap(result);
+  }
+  return result;
 }
 
 Space* G1CollectedHeap::space_containing(const void* addr) const {
@@ -3017,7 +2908,7 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _g1_reserved.byte_size();
+  return _hrs.reserved().byte_size();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -3546,9 +3437,9 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
-            _g1_storage.low_boundary(),
-            _g1_storage.high(),
-            _g1_storage.high_boundary());
+            _hrs.reserved().start(),
+            _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
+            _hrs.reserved().end());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = _young_list->length();
@@ -4239,10 +4130,7 @@
             // No need for an ergo verbose message here,
             // expansion_amount() does this when it returns a value > 0.
             if (!expand(expand_bytes)) {
-              // We failed to expand the heap so let's verify that
-              // committed/uncommitted amount match the backing store
-              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
-              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+              // We failed to expand the heap. Cannot do anything about it.
             }
           }
         }
@@ -4302,10 +4190,6 @@
       // RETIRE events are generated before the end GC event.
       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
 
-      if (mark_in_progress()) {
-        concurrent_mark()->update_g1_committed();
-      }
-
 #ifdef TRACESPINNING
       ParallelTaskTerminator::print_termination_counts();
 #endif
@@ -6140,6 +6024,7 @@
                                   bool locked) {
   assert(!hr->isHumongous(), "this is only for non-humongous regions");
   assert(!hr->is_empty(), "the region should not be empty");
+  assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
@@ -6194,7 +6079,7 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _free_list.add_ordered(list);
+    _hrs.insert_list_into_free_list(list);
   }
 }
 
@@ -6802,22 +6687,22 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _free_list.remove_all();
+  _hrs.remove_all_free_regions();
 }
 
 class RebuildRegionSetsClosure : public HeapRegionClosure {
 private:
   bool            _free_list_only;
   HeapRegionSet*   _old_set;
-  FreeRegionList* _free_list;
+  HeapRegionSeq*   _hrs;
   size_t          _total_used;
 
 public:
   RebuildRegionSetsClosure(bool free_list_only,
-                           HeapRegionSet* old_set, FreeRegionList* free_list) :
+                           HeapRegionSet* old_set, HeapRegionSeq* hrs) :
     _free_list_only(free_list_only),
-    _old_set(old_set), _free_list(free_list), _total_used(0) {
-    assert(_free_list->is_empty(), "pre-condition");
+    _old_set(old_set), _hrs(hrs), _total_used(0) {
+    assert(_hrs->num_free_regions() == 0, "pre-condition");
     if (!free_list_only) {
       assert(_old_set->is_empty(), "pre-condition");
     }
@@ -6830,7 +6715,7 @@
 
     if (r->is_empty()) {
       // Add free regions to the free list
-      _free_list->add_as_tail(r);
+      _hrs->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
 
@@ -6858,7 +6743,7 @@
     _young_list->empty_list();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
@@ -7013,13 +6898,42 @@
   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
                                GCAllocForTenured);
 }
+
+HeapRegion* OldGCAllocRegion::release() {
+  HeapRegion* cur = get();
+  if (cur != NULL) {
+    // Determine how far we are from the next card boundary. If it is smaller than
+    // the minimum object size we can allocate into, expand into the next card.
+    HeapWord* top = cur->top();
+    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+    if (to_allocate_words != 0) {
+      // We are not at a card boundary. Fill up, possibly into the next, taking the
+      // end of the region and the minimum object size into account.
+      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+      // Skip allocation if there is not enough space to allocate even the smallest
+      // possible object. In this case this region will not be retained, so the
+      // original problem cannot occur.
+      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+        CollectedHeap::fill_with_object(dummy, to_allocate_words);
+      }
+    }
+  }
+  return G1AllocRegion::release();
+}
+
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
 private:
   HeapRegionSet*   _old_set;
   HeapRegionSet*   _humongous_set;
-  FreeRegionList*  _free_list;
+  HeapRegionSeq*   _hrs;
 
 public:
   HeapRegionSetCount _old_count;
@@ -7028,8 +6942,8 @@
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
                            HeapRegionSet* humongous_set,
-                           FreeRegionList* free_list) :
-    _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
+                           HeapRegionSeq* hrs) :
+    _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
@@ -7043,7 +6957,7 @@
       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
-      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
+      assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
       _free_count.increment(1u, hr->capacity());
     } else {
       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
@@ -7052,7 +6966,7 @@
     return false;
   }
 
-  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
+  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         old_set->total_capacity_bytes(), _old_count.capacity()));
@@ -7061,26 +6975,17 @@
     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
 
-    guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
+    guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         free_list->total_capacity_bytes(), _free_count.capacity()));
   }
 };
 
-HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
-                                             HeapWord* bottom) {
-  HeapWord* end = bottom + HeapRegion::GrainWords;
-  MemRegion mr(bottom, end);
-  assert(_g1_reserved.contains(mr), "invariant");
-  // This might return NULL if the allocation fails
-  return new HeapRegion(hrs_index, _bot_shared, mr);
-}
-
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _free_list.verify_list();
+  _hrs.verify();
   {
     // Given that a concurrent operation might be adding regions to
     // the secondary free list we have to take the lock before
@@ -7111,9 +7016,9 @@
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
+  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
   heap_region_iterate(&cl);
-  cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
+  cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
 }
 
 // Optimized nmethod scanning
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -183,6 +183,13 @@
 public:
   OldGCAllocRegion()
   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+  // This specialization of release() makes sure that the last card that has been
+  // allocated into has been completely filled by a dummy object.
+  // This avoids races when remembered set scanning wants to update the BOT of the
+  // last card in the retained old gc alloc region, and allocation threads
+  // allocating into that card at the same time.
+  virtual HeapRegion* release();
 };
 
 // The G1 STW is alive closure.
@@ -199,6 +206,13 @@
 
 class RefineCardTableEntryClosure;
 
+class G1RegionMappingChangedListener : public G1MappingChangedListener {
+ private:
+  void reset_from_card_cache(uint start_idx, size_t num_regions);
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 class G1CollectedHeap : public SharedHeap {
   friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
@@ -237,19 +251,9 @@
 
   static size_t _humongous_object_threshold_in_words;
 
-  // Storage for the G1 heap.
-  VirtualSpace _g1_storage;
-  MemRegion    _g1_reserved;
-
-  // The part of _g1_storage that is currently committed.
-  MemRegion _g1_committed;
-
-  // The master free list. It will satisfy all new region allocations.
-  FreeRegionList _free_list;
-
   // The secondary free list which contains regions that have been
-  // freed up during the cleanup process. This will be appended to the
-  // master free list when appropriate.
+  // freed up during the cleanup process. This will be appended to
+  // the master free list when appropriate.
   FreeRegionList _secondary_free_list;
 
   // It keeps track of the old regions.
@@ -283,6 +287,9 @@
   // after heap shrinking (free_list_only == true).
   void rebuild_region_sets(bool free_list_only);
 
+  // Callback for region mapping changed events.
+  G1RegionMappingChangedListener _listener;
+
   // The sequence of all heap regions in the heap.
   HeapRegionSeq _hrs;
 
@@ -513,14 +520,6 @@
   // humongous object, set is_old to true. If not, to false.
   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 
-  // Attempt to satisfy a humongous allocation request of the given
-  // size by finding a contiguous set of free regions of num_regions
-  // length and remove them from the master free list. Return the
-  // index of the first region or G1_NULL_HRS_INDEX if the search
-  // was unsuccessful.
-  uint humongous_obj_allocate_find_first(uint num_regions,
-                                         size_t word_size);
-
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
@@ -862,11 +861,6 @@
                         CodeBlobClosure* scan_strong_code,
                         uint worker_i);
 
-  // Notifies all the necessary spaces that the committed space has
-  // been updated (either expanded or shrunk). It should be called
-  // after _g1_storage is updated.
-  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
-
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
   ConcurrentMarkThread* _cmThread;
@@ -1177,27 +1171,20 @@
   // But G1CollectedHeap doesn't yet support this.
 
   virtual bool is_maximal_no_gc() const {
-    return _g1_storage.uncommitted_size() == 0;
+    return _hrs.available() == 0;
   }
 
-  // The total number of regions in the heap.
-  uint n_regions() const { return _hrs.length(); }
+  // The current number of regions in the heap.
+  uint num_regions() const { return _hrs.length(); }
 
   // The max number of regions in the heap.
   uint max_regions() const { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  uint free_regions() const { return _free_list.length(); }
+  uint num_free_regions() const { return _hrs.num_free_regions(); }
 
   // The number of regions that are not completely free.
-  uint used_regions() const { return n_regions() - free_regions(); }
-
-  // The number of regions available for "regular" expansion.
-  uint expansion_regions() const { return _expansion_regions; }
-
-  // Factory method for HeapRegion instances. It will return NULL if
-  // the allocation fails.
-  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
+  uint num_used_regions() const { return num_regions() - num_free_regions(); }
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@@ -1246,7 +1233,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return hr->containing_set() == &_free_list;
+    return _hrs.is_free(hr);
   }
 #endif // ASSERT
 
@@ -1258,7 +1245,7 @@
   }
 
   void append_secondary_free_list() {
-    _free_list.add_ordered(&_secondary_free_list);
+    _hrs.insert_list_into_free_list(&_secondary_free_list);
   }
 
   void append_secondary_free_list_if_not_empty_with_lock() {
@@ -1304,6 +1291,11 @@
 
   // Returns "TRUE" iff "p" points into the committed areas of the heap.
   virtual bool is_in(const void* p) const;
+#ifdef ASSERT
+  // Returns whether p is in one of the available areas of the heap. Slow but
+  // extensive version.
+  bool is_in_exact(const void* p) const;
+#endif
 
   // Return "TRUE" iff the given object address is within the collection
   // set. Slow implementation.
@@ -1364,25 +1356,19 @@
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _g1_reserved.contains(p);
+    return _hrs.reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
-  MemRegion g1_reserved() {
-    return _g1_reserved;
-  }
-
-  // Returns a MemRegion that corresponds to the space that has been
-  // committed in the heap
-  MemRegion g1_committed() {
-    return _g1_committed;
+  MemRegion g1_reserved() const {
+    return _hrs.reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
 
-  G1SATBCardTableModRefBS* g1_barrier_set() {
-    return (G1SATBCardTableModRefBS*) barrier_set();
+  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableLoggingModRefBS*) barrier_set();
   }
 
   // This resets the card table to all zeros.  It is used after
@@ -1416,6 +1402,8 @@
   // within the heap.
   inline uint addr_to_region(HeapWord* addr) const;
 
+  inline HeapWord* bottom_addr_for_region(uint index) const;
+
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
@@ -1429,10 +1417,10 @@
   // setting the claim value of the second and subsequent regions of the
   // chunk.)  For now requires that "doHeapRegion" always returns "false",
   // i.e., that a closure never attempt to abort a traversal.
-  void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
-                                       uint worker,
-                                       uint no_of_par_workers,
-                                       jint claim_value);
+  void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
+                                       uint worker_id,
+                                       uint num_workers,
+                                       jint claim_value) const;
 
   // It resets all the region claim values to the default.
   void reset_heap_region_claim_values();
@@ -1457,11 +1445,6 @@
   // starting region for iterating over the current collection set.
   HeapRegion* start_cset_region_for_worker(uint worker_i);
 
-  // This is a convenience method that is used by the
-  // HeapRegionIterator classes to calculate the starting region for
-  // each worker so that they do not all start from the same region.
-  HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
-
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -47,19 +47,21 @@
   return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
 }
 
+inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
+  return _hrs.reserved().start() + index * HeapRegion::GrainWords;
+}
+
 template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
   assert(addr != NULL, "invariant");
-  assert(_g1_reserved.contains((const void*) addr),
+  assert(is_in_g1_reserved((const void*) addr),
       err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
-          p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
+          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
   return _hrs.addr_to_region((HeapWord*) addr);
 }
 
 template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing(const T addr) const {
+inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
   HeapRegion* hr = heap_region_containing_raw(addr);
   if (hr->continuesHumongous()) {
     return hr->humongous_start_region();
@@ -89,10 +91,9 @@
   return r != NULL && r->in_collection_set();
 }
 
-inline HeapWord*
-G1CollectedHeap::attempt_allocation(size_t word_size,
-                                    unsigned int* gc_count_before_ret,
-                                    int* gclocker_retry_count_ret) {
+inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
+                                                     unsigned int* gc_count_before_ret,
+                                                     int* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
@@ -252,8 +253,7 @@
   }
 }
 
-inline bool
-G1CollectedHeap::evacuation_should_fail() {
+inline bool G1CollectedHeap::evacuation_should_fail() {
   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
     return false;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -456,7 +456,7 @@
   } else {
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // We may immediately start allocating regions and placing them on the
@@ -829,7 +829,7 @@
 
   record_survivor_regions(0, NULL, NULL);
 
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
@@ -1181,7 +1181,7 @@
 
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
@@ -1203,7 +1203,7 @@
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
   _heap_used_bytes_before_gc = _g1->used();
-  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
+  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
 
   _eden_capacity_bytes_before_gc =
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@@ -1618,7 +1618,7 @@
 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
   _collectionSetChooser->clear();
 
-  uint region_num = _g1->n_regions();
+  uint region_num = _g1->num_regions();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     const uint OverpartitionFactor = 4;
     uint WorkUnit;
@@ -1639,7 +1639,7 @@
         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
              MinWorkUnit);
     }
-    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
+    _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
                                                            WorkUnit);
     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
                                             (int) WorkUnit);
@@ -1936,7 +1936,7 @@
   // of them are available.
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->n_regions();
+  const size_t region_num = g1h->num_regions();
   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
   size_t result = region_num * perc / 100;
   // emulate ceiling
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -33,7 +33,7 @@
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
 
-void G1HotCardCache::initialize() {
+void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
   if (default_use_cache()) {
     _use_cache = true;
 
@@ -49,7 +49,7 @@
     _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
     _hot_cache_par_claimed_idx = 0;
 
-    _card_counts.initialize();
+    _card_counts.initialize(card_counts_storage);
   }
 }
 
@@ -135,11 +135,8 @@
   // above, are discarded prior to re-enabling the cache near the end of the GC.
 }
 
-void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
-  _card_counts.resize(heap_capacity);
-}
-
 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
+  assert(!hr->isHumongous(), "Should have been cleared");
   _card_counts.clear_region(hr);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -78,7 +78,7 @@
   G1HotCardCache(G1CollectedHeap* g1h);
   ~G1HotCardCache();
 
-  void initialize();
+  void initialize(G1RegionToSpaceMapper* card_counts_storage);
 
   bool use_cache() { return _use_cache; }
 
@@ -115,9 +115,6 @@
 
   bool hot_cache_is_empty() { return _n_hot == 0; }
 
-  // Resizes the card counts table to match the given capacity
-  void resize_card_counts(size_t heap_capacity);
-
   // Zeros the values in the card counts table for entire committed heap
   void reset_card_counts();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "services/memTracker.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+#include "utilities/bitMap.inline.hpp"
+
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
+  _high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) {
+}
+
+bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
+  if (!rs.is_reserved()) {
+    return false;  // Allocation failed.
+  }
+  assert(_low_boundary == NULL, "VirtualSpace already initialized");
+  assert(page_size > 0, "Granularity must be non-zero.");
+
+  _low_boundary  = rs.base();
+  _high_boundary = _low_boundary + rs.size();
+
+  _special = rs.special();
+  _executable = rs.executable();
+
+  _page_size = page_size;
+
+  assert(_committed.size() == 0, "virtual space initialized more than once");
+  uintx size_in_bits = rs.size() / page_size;
+  _committed.resize(size_in_bits, /* in_resource_area */ false);
+
+  return true;
+}
+
+
+G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
+  release();
+}
+
+void G1PageBasedVirtualSpace::release() {
+  // This does not release memory it never reserved.
+  // Caller must release via rs.release();
+  _low_boundary           = NULL;
+  _high_boundary          = NULL;
+  _special                = false;
+  _executable             = false;
+  _page_size              = 0;
+  _committed.resize(0, false);
+}
+
+size_t G1PageBasedVirtualSpace::committed_size() const {
+  return _committed.count_one_bits() * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::reserved_size() const {
+  return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
+}
+
+size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
+  return reserved_size() - committed_size();
+}
+
+uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+  return (addr - _low_boundary) / _page_size;
+}
+
+bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_zero_offset(start, end) >= end;
+}
+
+bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_one_offset(start, end) >= end;
+}
+
+char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+  return _low_boundary + index * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
+  return num * _page_size;
+}
+
+MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+  // We need to make sure to commit all pages covered by the given area.
+  guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+
+  if (!_special) {
+    os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
+                              err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+  }
+  _committed.set_range(start, start + size_in_pages);
+
+  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+  return result;
+}
+
+MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
+  guarantee(is_area_committed(start, size_in_pages), "checking");
+
+  if (!_special) {
+    os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+  }
+
+  _committed.clear_range(start, start + size_in_pages);
+
+  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+  return result;
+}
+
+bool G1PageBasedVirtualSpace::contains(const void* p) const {
+  return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
+}
+
+#ifndef PRODUCT
+void G1PageBasedVirtualSpace::print_on(outputStream* out) {
+  out->print   ("Virtual space:");
+  if (special()) out->print(" (pinned in memory)");
+  out->cr();
+  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
+  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
+}
+
+void G1PageBasedVirtualSpace::print() {
+  print_on(tty);
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/bitMap.hpp"
+
+// Virtual space management helper for a virtual space with an OS page allocation
+// granularity.
+// (De-)Allocation requests are always OS page aligned by passing a page index
+// and multiples of pages.
+// The implementation gives an error when trying to commit or uncommit pages that
+// have already been committed or uncommitted.
+class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  // Reserved area addresses.
+  char* _low_boundary;
+  char* _high_boundary;
+
+  // The commit/uncommit granularity in bytes.
+  size_t _page_size;
+
+  // Bitmap used for verification of commit/uncommit operations.
+  BitMap _committed;
+
+  // Indicates that the entire space has been committed and pinned in memory,
+  // os::commit_memory() or os::uncommit_memory() have no function.
+  bool _special;
+
+  // Indicates whether the committed space should be executable.
+  bool _executable;
+
+  // Returns the index of the page which contains the given address.
+  uintptr_t  addr_to_page_index(char* addr) const;
+  // Returns the address of the given page index.
+  char*  page_start(uintptr_t index);
+  // Returns the byte size of the given number of pages.
+  size_t byte_size_for_pages(size_t num);
+
+  // Returns true if the entire area is backed by committed memory.
+  bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+  // Returns true if the entire area is not backed by committed memory.
+  bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+
+ public:
+
+  // Commit the given area of pages starting at start being size_in_pages large.
+  MemRegion commit(uintptr_t start, size_t size_in_pages);
+
+  // Uncommit the given area of pages starting at start being size_in_pages large.
+  MemRegion uncommit(uintptr_t start, size_t size_in_pages);
+
+  bool special() const { return _special; }
+
+  // Initialization
+  G1PageBasedVirtualSpace();
+  bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+
+  // Destruction
+  ~G1PageBasedVirtualSpace();
+
+  // Amount of reserved memory.
+  size_t reserved_size() const;
+  // Memory used in this virtual space.
+  size_t committed_size() const;
+  // Memory left to use/expand in this virtual space.
+  size_t uncommitted_size() const;
+
+  bool contains(const void* p) const;
+
+  MemRegion reserved() {
+    MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
+    return x;
+  }
+
+  void release();
+
+  void check_for_contiguity() PRODUCT_RETURN;
+
+  // Debugging
+  void print_on(outputStream* out) PRODUCT_RETURN;
+  void print();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/virtualspace.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
+                                             size_t commit_granularity,
+                                             size_t region_granularity,
+                                             MemoryType type) :
+  _storage(),
+  _commit_granularity(commit_granularity),
+  _region_granularity(region_granularity),
+  _listener(NULL),
+  _commit_map() {
+  guarantee(is_power_of_2(commit_granularity), "must be");
+  guarantee(is_power_of_2(region_granularity), "must be");
+  _storage.initialize_with_granularity(rs, commit_granularity);
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), type);
+}
+
+// G1RegionToSpaceMapper implementation where the region granularity is larger than
+// or the same as the commit granularity.
+// Basically, the space corresponding to one region region spans several OS pages.
+class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  size_t _pages_per_region;
+
+ public:
+  G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
+                                      size_t os_commit_granularity,
+                                      size_t alloc_granularity,
+                                      size_t commit_factor,
+                                      MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+
+    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.set_range(start_idx, start_idx + num_regions);
+    fire_on_commit(start_idx, num_regions);
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.clear_range(start_idx, start_idx + num_regions);
+  }
+};
+
+// G1RegionToSpaceMapper implementation where the region granularity is smaller
+// than the commit granularity.
+// Basically, the contents of one OS page span several regions.
+class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  class CommitRefcountArray : public G1BiasedMappedArray<uint> {
+   protected:
+     virtual uint default_value() const { return 0; }
+  };
+
+  size_t _regions_per_page;
+
+  CommitRefcountArray _refcounts;
+
+  uintptr_t region_idx_to_page_idx(uint region) const {
+    return region / _regions_per_page;
+  }
+
+ public:
+  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
+                                       size_t os_commit_granularity,
+                                       size_t alloc_granularity,
+                                       size_t commit_factor,
+                                       MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+
+    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      if (old_refcount == 0) {
+        _storage.commit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount + 1);
+      _commit_map.set_bit(i);
+      fire_on_commit(i, 1);
+    }
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      assert(old_refcount > 0, "must be");
+      if (old_refcount == 1) {
+        _storage.uncommit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount - 1);
+      _commit_map.clear_bit(i);
+    }
+  }
+};
+
+void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) {
+  if (_listener != NULL) {
+    _listener->on_commit(start_idx, num_regions);
+  }
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
+                                                            size_t os_commit_granularity,
+                                                            size_t region_granularity,
+                                                            size_t commit_factor,
+                                                            MemoryType type) {
+
+  if (region_granularity >= (os_commit_granularity * commit_factor)) {
+    return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  } else {
+    return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
+ public:
+  // Fired after commit of the memory, i.e. the memory this listener is registered
+  // for can be accessed.
+  virtual void on_commit(uint start_idx, size_t num_regions) = 0;
+};
+
+// Maps region based commit/uncommit requests to the underlying page sized virtual
+// space.
+class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
+ private:
+  G1MappingChangedListener* _listener;
+ protected:
+  // Backing storage.
+  G1PageBasedVirtualSpace _storage;
+  size_t _commit_granularity;
+  size_t _region_granularity;
+  // Mapping management
+  BitMap _commit_map;
+
+  G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+
+  void fire_on_commit(uint start_idx, size_t num_regions);
+ public:
+  MemRegion reserved() { return _storage.reserved(); }
+
+  void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
+
+  virtual ~G1RegionToSpaceMapper() {
+    _commit_map.resize(0, /* in_resource_area */ false);
+  }
+
+  bool is_committed(uintptr_t idx) const {
+    return _commit_map.at(idx);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+
+  // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+  // The byte_translation_factor defines how many bytes in a region correspond to
+  // a single byte in the data structure this mapper is for.
+  // Eg. in the card table, this value corresponds to the size a single card
+  // table entry corresponds to.
+  static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
+                                              size_t os_commit_granularity,
+                                              size_t region_granularity,
+                                              size_t byte_translation_factor,
+                                              MemoryType type);
+};
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -540,6 +540,12 @@
 
 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
                            bool check_for_refs_into_cset) {
+  assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
+         err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
+                 p2i(card_ptr),
+                 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
+                 _ct_bs->addr_for(card_ptr),
+                 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
 
   // If the card is no longer dirty, nothing to do.
   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
@@ -38,7 +39,6 @@
   _kind = G1SATBCT;
 }
 
-
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   // Nulls should have been already filtered.
   assert(pre_val->is_oop(true), "Error");
@@ -125,13 +125,52 @@
 }
 #endif
 
+void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _card_table->clear(mr);
+}
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
-  _dcqs(JavaThread::dirty_card_queue_set())
+  _dcqs(JavaThread::dirty_card_queue_set()),
+  _listener()
 {
   _kind = G1SATBCTLogging;
+  _listener.set_card_table(this);
+}
+
+void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
+  mapper->set_mapping_changed_listener(&_listener);
+
+  _byte_map_size = mapper->reserved().byte_size();
+
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  HeapWord* low_bound  = _whole_heap.start();
+  HeapWord* high_bound = _whole_heap.end();
+
+  _cur_covered_regions = 1;
+  _covered[0] = _whole_heap;
+
+  _byte_map = (jbyte*) mapper->reserved().start();
+  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+  if (TraceCardTableModRefBS) {
+    gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
+    gclog_or_tty->print_cr("  "
+                  "  &_byte_map[0]: " INTPTR_FORMAT
+                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                  p2i(&_byte_map[0]),
+                  p2i(&_byte_map[_last_valid_index]));
+    gclog_or_tty->print_cr("  "
+                  "  byte_map_base: " INTPTR_FORMAT,
+                  p2i(byte_map_base));
+  }
 }
 
 void
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
@@ -33,6 +34,7 @@
 #if INCLUDE_ALL_GCS
 
 class DirtyCardQueueSet;
+class G1SATBCardTableLoggingModRefBS;
 
 // This barrier is specialized to use a logging barrier to support
 // snapshot-at-the-beginning marking.
@@ -126,18 +128,40 @@
     jbyte val = _byte_map[card_index];
     return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
   }
+};
 
+class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
+ private:
+  G1SATBCardTableLoggingModRefBS* _card_table;
+ public:
+  G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
+
+  void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
 };
 
 // Adds card-table logging to the post-barrier.
 // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
 class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
+  friend class G1SATBCardTableLoggingModRefBSChangedListener;
  private:
+  G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
  public:
+  static size_t compute_size(size_t mem_region_size_in_words) {
+    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
+  }
+
   G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                  int max_covered_regions);
 
+  virtual void initialize() { }
+  virtual void initialize(G1RegionToSpaceMapper* mapper);
+
+  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+
   bool is_a(BarrierSet::Name bsn) {
     return bsn == BarrierSet::G1SATBCTLogging ||
       G1SATBCardTableModRefBS::is_a(bsn);
@@ -154,8 +178,6 @@
 
   void write_region_work(MemRegion mr)    { invalidate(mr); }
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
-
-
 };
 
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -345,11 +345,6 @@
   return low;
 }
 
-#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
-#endif // _MSC_VER
-
-
 HeapRegion::HeapRegion(uint hrs_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
                        MemRegion mr) :
@@ -361,7 +356,7 @@
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _young_type(NotYoung), _next_young_region(NULL),
-    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
+    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 #ifdef ASSERT
     _containing_set(NULL),
 #endif // ASSERT
@@ -370,14 +365,20 @@
     _predicted_bytes_to_copy(0)
 {
   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
+  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
+
+  initialize(mr);
+}
+
+void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  assert(_rem_set->is_empty(), "Remembered set must be empty");
+
+  G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
+
   _orig_end = mr.end();
-  // Note that initialize() will set the start of the unmarked area of the
-  // region.
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
   record_top_and_timestamp();
-
-  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -905,7 +906,7 @@
     }
 
     // If it returns false, verify_for_object() will output the
-    // appropriate messasge.
+    // appropriate message.
     if (do_bot_verify &&
         !g1->is_obj_dead(obj, this) &&
         !_offsets.verify_for_object(p, obj_size)) {
@@ -1036,8 +1037,7 @@
   set_top(bottom());
   set_saved_mark_word(bottom());
   CompactibleSpace::clear(mangle_space);
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
+  reset_bot();
 }
 
 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
@@ -1127,9 +1127,11 @@
   _gc_time_stamp(0)
 {
   _offsets.set_space(this);
-  // false ==> we'll do the clearing if there's clearing to be done.
-  CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
+}
+
+void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  CompactibleSpace::initialize(mr, clear_space, mangle_space);
   _top = bottom();
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
+  reset_bot();
 }
+
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -62,7 +62,7 @@
                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
 
 // sentinel value for hrs_index
-#define G1_NULL_HRS_INDEX ((uint) -1)
+#define G1_NO_HRS_INDEX ((uint) -1)
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -146,6 +146,9 @@
   HeapWord* top() const { return _top; }
 
  protected:
+  // Reset the G1OffsetTableContigSpace.
+  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
+
   HeapWord** top_addr() { return &_top; }
   // Allocation helpers (return NULL if full).
   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
@@ -200,8 +203,7 @@
   virtual void print() const;
 
   void reset_bot() {
-    _offsets.zero_bottom_entry();
-    _offsets.initialize_threshold();
+    _offsets.reset_bot();
   }
 
   void update_bot_for_object(HeapWord* start, size_t word_size) {
@@ -264,7 +266,6 @@
 #ifdef ASSERT
   HeapRegionSetBase* _containing_set;
 #endif // ASSERT
-  bool _pending_removal;
 
   // For parallel heapRegion traversal.
   jint _claimed;
@@ -333,6 +334,12 @@
              G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr);
 
+  // Initializing the HeapRegion not only resets the data structure, but also
+  // resets the BOT for that heap region.
+  // The default values for clear_space means that we will do the clearing if
+  // there's clearing to be done ourselves. We also always mangle the space.
+  virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
+
   static int    LogOfHRGrainBytes;
   static int    LogOfHRGrainWords;
 
@@ -553,26 +560,6 @@
   // to provide a dummy version of it.
 #endif // ASSERT
 
-  // If we want to remove regions from a list in bulk we can simply tag
-  // them with the pending_removal tag and call the
-  // remove_all_pending() method on the list.
-
-  bool pending_removal() { return _pending_removal; }
-
-  void set_pending_removal(bool pending_removal) {
-    if (pending_removal) {
-      assert(!_pending_removal && containing_set() != NULL,
-             "can only set pending removal to true if it's false and "
-             "the region belongs to a region set");
-    } else {
-      assert( _pending_removal && containing_set() == NULL,
-              "can only set pending removal to false if it's true and "
-              "the region does not belong to a region set");
-    }
-
-    _pending_removal = pending_removal;
-  }
-
   HeapRegion* get_next_young_region() { return _next_young_region; }
   void set_next_young_region(HeapRegion* hr) {
     _next_young_region = hr;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -373,17 +373,17 @@
                                                        _max_regions,
                                                        &_static_mem_size);
 
-  for (uint i = 0; i < n_par_rs; i++) {
-    for (uint j = 0; j < _max_regions; j++) {
-      set(i, j, InvalidCard);
-    }
-  }
+  invalidate(0, _max_regions);
 }
 
-void FromCardCache::shrink(uint new_num_regions) {
+void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
+  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
+            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
+                    start_idx, new_num_regions));
   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
-    assert(new_num_regions <= _max_regions, "Must be within max.");
-    for (uint j = new_num_regions; j < _max_regions; j++) {
+    uint end_idx = (start_idx + (uint)new_num_regions);
+    assert(end_idx <= _max_regions, "Must be within max.");
+    for (uint j = start_idx; j < end_idx; j++) {
       set(i, j, InvalidCard);
     }
   }
@@ -407,12 +407,12 @@
   }
 }
 
-void OtherRegionsTable::init_from_card_cache(uint max_regions) {
+void OtherRegionsTable::initialize(uint max_regions) {
   FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
 }
 
-void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
-  FromCardCache::shrink(new_num_regions);
+void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
+  FromCardCache::invalidate(start_idx, num_regions);
 }
 
 void OtherRegionsTable::print_from_card_cache() {
@@ -841,7 +841,7 @@
                                    HeapRegion* hr)
   : _bosa(bosa),
     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
-    _code_roots(), _other_regions(hr, &_m) {
+    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
   reset_for_par_iteration();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -84,7 +84,7 @@
 
   static void initialize(uint n_par_rs, uint max_num_regions);
 
-  static void shrink(uint new_num_regions);
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
 
@@ -213,11 +213,11 @@
 
   // Declare the heap size (in # of regions) to the OtherRegionsTable.
   // (Uses it to initialize from_card_cache).
-  static void init_from_card_cache(uint max_regions);
+  static void initialize(uint max_regions);
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  // Make sure any entries for higher regions are invalid.
-  static void shrink_from_card_cache(uint new_num_regions);
+  // Declares that regions between start_idx <= i < start_idx + num_regions are
+  // not in use. Make sure that any entries for these regions are invalid.
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print_from_card_cache();
 };
@@ -400,12 +400,11 @@
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
   static void init_heap(uint max_regions) {
-    OtherRegionsTable::init_from_card_cache(max_regions);
+    OtherRegionsTable::initialize(max_regions);
   }
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  static void shrink_heap(uint new_n_regs) {
-    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
+  static void invalidate(uint start_idx, uint num_regions) {
+    OtherRegionsTable::invalidate(start_idx, num_regions);
   }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -25,163 +25,204 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "memory/allocation.hpp"
 
-// Private
+void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
+                               G1RegionToSpaceMapper* prev_bitmap,
+                               G1RegionToSpaceMapper* next_bitmap,
+                               G1RegionToSpaceMapper* bot,
+                               G1RegionToSpaceMapper* cardtable,
+                               G1RegionToSpaceMapper* card_counts) {
+  _allocated_heapregions_length = 0;
+
+  _heap_mapper = heap_storage;
+
+  _prev_bitmap_mapper = prev_bitmap;
+  _next_bitmap_mapper = next_bitmap;
+
+  _bot_mapper = bot;
+  _cardtable_mapper = cardtable;
+
+  _card_counts_mapper = card_counts;
+
+  MemRegion reserved = heap_storage->reserved();
+  _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
+
+  _available_map.resize(_regions.length(), false);
+  _available_map.clear();
+}
 
-uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
-  uint len = length();
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(from <= len,
-         err_msg("from: %u should be valid and <= than %u", from, len));
+bool HeapRegionSeq::is_available(uint region) const {
+  return _available_map.at(region);
+}
+
+#ifdef ASSERT
+bool HeapRegionSeq::is_free(HeapRegion* hr) const {
+  return _free_list.contains(hr);
+}
+#endif
+
+HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
+  HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
+  MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+  assert(reserved().contains(mr), "invariant");
+  return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
+}
+
+void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
+  guarantee(num_regions > 0, "Must commit more than zero regions");
+  guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
+
+  _num_committed += (uint)num_regions;
+
+  _heap_mapper->commit_regions(index, num_regions);
+
+  // Also commit auxiliary data
+  _prev_bitmap_mapper->commit_regions(index, num_regions);
+  _next_bitmap_mapper->commit_regions(index, num_regions);
 
-  uint curr = from;
-  uint first = G1_NULL_HRS_INDEX;
-  uint num_so_far = 0;
-  while (curr < len && num_so_far < num) {
-    if (at(curr)->is_empty()) {
-      if (first == G1_NULL_HRS_INDEX) {
-        first = curr;
-        num_so_far = 1;
-      } else {
-        num_so_far += 1;
-      }
-    } else {
-      first = G1_NULL_HRS_INDEX;
-      num_so_far = 0;
+  _bot_mapper->commit_regions(index, num_regions);
+  _cardtable_mapper->commit_regions(index, num_regions);
+
+  _card_counts_mapper->commit_regions(index, num_regions);
+}
+
+void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
+  guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
+  guarantee(_num_committed >= num_regions, "pre-condition");
+
+  // Print before uncommitting.
+  if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+    for (uint i = start; i < start + num_regions; i++) {
+      HeapRegion* hr = at(i);
+      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
     }
-    curr += 1;
   }
-  assert(num_so_far <= num, "post-condition");
-  if (num_so_far == num) {
-    // we found enough space for the humongous object
-    assert(from <= first && first < len, "post-condition");
-    assert(first < curr && (curr - first) == num, "post-condition");
-    for (uint i = first; i < first + num; ++i) {
-      assert(at(i)->is_empty(), "post-condition");
+
+  _num_committed -= (uint)num_regions;
+
+  _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
+  _heap_mapper->uncommit_regions(start, num_regions);
+
+  // Also uncommit auxiliary data
+  _prev_bitmap_mapper->uncommit_regions(start, num_regions);
+  _next_bitmap_mapper->uncommit_regions(start, num_regions);
+
+  _bot_mapper->uncommit_regions(start, num_regions);
+  _cardtable_mapper->uncommit_regions(start, num_regions);
+
+  _card_counts_mapper->uncommit_regions(start, num_regions);
+}
+
+void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
+  guarantee(num_regions > 0, "No point in calling this for zero regions");
+  commit_regions(start, num_regions);
+  for (uint i = start; i < start + num_regions; i++) {
+    if (_regions.get_by_index(i) == NULL) {
+      HeapRegion* new_hr = new_heap_region(i);
+      _regions.set_by_index(i, new_hr);
+      _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
     }
-    return first;
-  } else {
-    // we failed to find enough space for the humongous object
-    return G1_NULL_HRS_INDEX;
+  }
+
+  _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
+
+  for (uint i = start; i < start + num_regions; i++) {
+    assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
+    HeapRegion* hr = at(i);
+    if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
+    }
+    HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
+    MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+
+    hr->initialize(mr);
+    insert_into_free_list(at(i));
   }
 }
 
-// Public
+uint HeapRegionSeq::expand_by(uint num_regions) {
+  return expand_at(0, num_regions);
+}
+
+uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
+  if (num_regions == 0) {
+    return 0;
+  }
+
+  uint cur = start;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
 
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
-  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
-         "bottom should be heap region aligned");
-  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
-         "end should be heap region aligned");
+  uint expanded = 0;
 
-  _next_search_index = 0;
-  _allocated_length = 0;
+  while (expanded < num_regions &&
+         (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
+    uint to_expand = MIN2(num_regions - expanded, num_last_found);
+    make_regions_available(idx_last_found, to_expand);
+    expanded += to_expand;
+    cur = idx_last_found + num_last_found + 1;
+  }
 
-  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
+  verify_optional();
+  return expanded;
 }
 
-MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
-                                   HeapWord* new_end,
-                                   FreeRegionList* list) {
-  assert(old_end < new_end, "don't call it otherwise");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  HeapWord* next_bottom = old_end;
-  assert(heap_bottom() <= next_bottom, "invariant");
-  while (next_bottom < new_end) {
-    assert(next_bottom < heap_end(), "invariant");
-    uint index = length();
-
-    assert(index < max_length(), "otherwise we cannot expand further");
-    if (index == 0) {
-      // We have not allocated any regions so far
-      assert(next_bottom == heap_bottom(), "invariant");
-    } else {
-      // next_bottom should match the end of the last/previous region
-      assert(next_bottom == at(index - 1)->end(), "invariant");
-    }
+uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = 0;
 
-    if (index == _allocated_length) {
-      // We have to allocate a new HeapRegion.
-      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
-      if (new_hr == NULL) {
-        // allocation failed, we bail out and return what we have done so far
-        return MemRegion(old_end, next_bottom);
-      }
-      assert(_regions.get_by_index(index) == NULL, "invariant");
-      _regions.set_by_index(index, new_hr);
-      increment_allocated_length();
+  while (length_found < num && cur < max_length()) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      length_found++;
+    } else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
     }
-    // Have to increment the length first, otherwise we will get an
-    // assert failure at(index) below.
-    increment_length();
-    HeapRegion* hr = at(index);
-    list->add_as_tail(hr);
+    cur++;
+  }
 
-    next_bottom = hr->end();
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                        " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
+    }
+    return found;
+  } else {
+    return G1_NO_HRS_INDEX;
   }
-  assert(next_bottom == new_end, "post-condition");
-  return MemRegion(old_end, next_bottom);
 }
 
-uint HeapRegionSeq::free_suffix() {
-  uint res = 0;
-  uint index = length();
-  while (index > 0) {
-    index -= 1;
-    if (!at(index)->is_empty()) {
-      break;
+HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
+  guarantee(r != NULL, "Start region must be a valid region");
+  guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
+  for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
+    HeapRegion* hr = _regions.get_by_index(i);
+    if (is_available(i)) {
+      return hr;
     }
-    res += 1;
   }
-  return res;
-}
-
-uint HeapRegionSeq::find_contiguous(uint num) {
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(_next_search_index <= length(),
-         err_msg("_next_search_index: %u should be valid and <= than %u",
-                 _next_search_index, length()));
-
-  uint start = _next_search_index;
-  uint res = find_contiguous_from(start, num);
-  if (res == G1_NULL_HRS_INDEX && start > 0) {
-    // Try starting from the beginning. If _next_search_index was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
-  }
-  if (res != G1_NULL_HRS_INDEX) {
-    assert(res < length(), err_msg("res: %u should be valid", res));
-    _next_search_index = res + num;
-    assert(_next_search_index <= length(),
-           err_msg("_next_search_index: %u should be valid and <= than %u",
-                   _next_search_index, length()));
-  }
-  return res;
+  return NULL;
 }
 
 void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
-  iterate_from((HeapRegion*) NULL, blk);
-}
-
-void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
-  uint hr_index = 0;
-  if (hr != NULL) {
-    hr_index = hr->hrs_index();
-  }
+  uint len = max_length();
 
-  uint len = length();
-  for (uint i = hr_index; i < len; i += 1) {
-    bool res = blk->doHeapRegion(at(i));
-    if (res) {
-      blk->incomplete();
-      return;
+  for (uint i = 0; i < len; i++) {
+    if (!is_available(i)) {
+      continue;
     }
-  }
-  for (uint i = 0; i < hr_index; i += 1) {
+    guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
     bool res = blk->doHeapRegion(at(i));
     if (res) {
       blk->incomplete();
@@ -190,71 +231,220 @@
   }
 }
 
+uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur < max_length() && is_available(cur)) {
+    cur++;
+  }
+  if (cur == max_length()) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur < max_length() && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == max_length() || num_regions == 0 || is_available(cur),
+         err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
+#endif
+  return num_regions;
+}
+
+uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
+  return num_regions * worker_i / num_workers;
+}
+
+void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
+  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+
+  // Every worker will actually look at all regions, skipping over regions that
+  // are currently not committed.
+  // This also (potentially) iterates over regions newly allocated during GC. This
+  // is no problem except for some extra work.
+  for (uint count = 0; count < _allocated_heapregions_length; count++) {
+    const uint index = (start_index + count) % _allocated_heapregions_length;
+    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+    // Skip over unavailable regions
+    if (!is_available(index)) {
+      continue;
+    }
+    HeapRegion* r = _regions.get_by_index(index);
+    // We'll ignore "continues humongous" regions (we'll process them
+    // when we come across their corresponding "start humongous"
+    // region) and regions already claimed.
+    if (r->claim_value() == claim_value || r->continuesHumongous()) {
+      continue;
+    }
+    // OK, try to claim it
+    if (!r->claimHeapRegion(claim_value)) {
+      continue;
+    }
+    // Success!
+    if (r->startsHumongous()) {
+      // If the region is "starts humongous" we'll iterate over its
+      // "continues humongous" first; in fact we'll do them
+      // first. The order is important. In one case, calling the
+      // closure on the "starts humongous" region might de-allocate
+      // and clear all its "continues humongous" regions and, as a
+      // result, we might end up processing them twice. So, we'll do
+      // them first (note: most closures will ignore them anyway) and
+      // then we'll do the "starts humongous" region.
+      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
+        HeapRegion* chr = _regions.get_by_index(ch_index);
+
+        assert(chr->continuesHumongous(), "Must be humongous region");
+        assert(chr->humongous_start_region() == r,
+               err_msg("Must work on humongous continuation of the original start region "
+                       PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
+        assert(chr->claim_value() != claim_value,
+               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
+
+        bool claim_result = chr->claimHeapRegion(claim_value);
+        // We should always be able to claim it; no one else should
+        // be trying to claim this region.
+        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+
+        bool res2 = blk->doHeapRegion(chr);
+        if (res2) {
+          return;
+        }
+
+        // Right now, this holds (i.e., no closure that actually
+        // does something with "continues humongous" regions
+        // clears them). We might have to weaken it in the future,
+        // but let's leave these two asserts here for extra safety.
+        assert(chr->continuesHumongous(), "should still be the case");
+        assert(chr->humongous_start_region() == r, "sanity");
+      }
+    }
+
+    bool res = blk->doHeapRegion(r);
+    if (res) {
+      return;
+    }
+  }
+}
+
 uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
-  // Reset this in case it's currently pointing into the regions that
-  // we just removed.
-  _next_search_index = 0;
-
   assert(length() > 0, "the region sequence should not be empty");
-  assert(length() <= _allocated_length, "invariant");
-  assert(_allocated_length > 0, "we should have at least one region committed");
+  assert(length() <= _allocated_heapregions_length, "invariant");
+  assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
   assert(num_regions_to_remove < length(), "We should never remove all regions");
 
-  uint i = 0;
-  for (; i < num_regions_to_remove; i++) {
-    HeapRegion* cur = at(length() - 1);
+  if (num_regions_to_remove == 0) {
+    return 0;
+  }
+
+  uint removed = 0;
+  uint cur = _allocated_heapregions_length - 1;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
 
-    if (!cur->is_empty()) {
-      // We have to give up if the region can not be moved
-      break;
+  while ((removed < num_regions_to_remove) &&
+      (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
+    // Only allow uncommit from the end of the heap.
+    if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
+      return 0;
+    }
+    uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
+
+    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+
+    cur -= num_last_found;
+    removed += to_remove;
   }
-    assert(!cur->isHumongous(), "Humongous regions should not be empty");
 
-    decrement_length();
-  }
-  return i;
+  verify_optional();
+
+  return removed;
 }
 
-#ifndef PRODUCT
-void HeapRegionSeq::verify_optional() {
-  guarantee(length() <= _allocated_length,
+uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
+  guarantee(start_idx < _allocated_heapregions_length, "checking");
+  guarantee(res_idx != NULL, "checking");
+
+  uint num_regions_found = 0;
+
+  jlong cur = start_idx;
+  while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == -1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+void HeapRegionSeq::verify() {
+  guarantee(length() <= _allocated_heapregions_length,
             err_msg("invariant: _length: %u _allocated_length: %u",
-                    length(), _allocated_length));
-  guarantee(_allocated_length <= max_length(),
+                    length(), _allocated_heapregions_length));
+  guarantee(_allocated_heapregions_length <= max_length(),
             err_msg("invariant: _allocated_length: %u _max_length: %u",
-                    _allocated_length, max_length()));
-  guarantee(_next_search_index <= length(),
-            err_msg("invariant: _next_search_index: %u _length: %u",
-                    _next_search_index, length()));
+                    _allocated_heapregions_length, max_length()));
 
+  bool prev_committed = true;
+  uint num_committed = 0;
   HeapWord* prev_end = heap_bottom();
-  for (uint i = 0; i < _allocated_length; i += 1) {
+  for (uint i = 0; i < _allocated_heapregions_length; i++) {
+    if (!is_available(i)) {
+      prev_committed = false;
+      continue;
+    }
+    num_committed++;
     HeapRegion* hr = _regions.get_by_index(i);
     guarantee(hr != NULL, err_msg("invariant: i: %u", i));
-    guarantee(hr->bottom() == prev_end,
+    guarantee(!prev_committed || hr->bottom() == prev_end,
               err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
                       i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
     guarantee(hr->hrs_index() == i,
               err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
-    if (i < length()) {
-      // Asserts will fire if i is >= _length
-      HeapWord* addr = hr->bottom();
-      guarantee(addr_to_region(addr) == hr, "sanity");
-    } else {
-      guarantee(hr->is_empty(), "sanity");
-      guarantee(!hr->isHumongous(), "sanity");
-      // using assert instead of guarantee here since containing_set()
-      // is only available in non-product builds.
-      assert(hr->containing_set() == NULL, "sanity");
-    }
+    // Asserts will fire if i is >= _length
+    HeapWord* addr = hr->bottom();
+    guarantee(addr_to_region(addr) == hr, "sanity");
+    // We cannot check whether the region is part of a particular set: at the time
+    // this method may be called, we have only completed allocation of the regions,
+    // but not put into a region set.
+    prev_committed = true;
     if (hr->startsHumongous()) {
       prev_end = hr->orig_end();
     } else {
       prev_end = hr->end();
     }
   }
-  for (uint i = _allocated_length; i < max_length(); i += 1) {
+  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
     guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
   }
+
+  guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
+  _free_list.verify();
+}
+
+#ifndef PRODUCT
+void HeapRegionSeq::verify_optional() {
+  verify();
 }
 #endif // PRODUCT
+
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,8 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
 #include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "gc_implementation/g1/heapRegionSet.hpp"
 
 class HeapRegion;
 class HeapRegionClosure;
@@ -33,16 +35,20 @@
 
 class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
  protected:
-   virtual HeapRegion* default_value() const { return NULL; }
+  virtual HeapRegion* default_value() const { return NULL; }
 };
 
-// This class keeps track of the region metadata (i.e., HeapRegion
-// instances). They are kept in the _regions array in address
-// order. A region's index in the array corresponds to its index in
-// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
-// the one after it, etc.). Two regions that are consecutive in the
-// array should also be adjacent in the address space (i.e.,
-// region(i).end() == region(i+1).bottom().
+// This class keeps track of the actual heap memory, auxiliary data
+// and its metadata (i.e., HeapRegion instances) and the list of free regions.
+//
+// This allows maximum flexibility for deciding what to commit or uncommit given
+// a request from outside.
+//
+// HeapRegions are kept in the _regions array in address order. A region's
+// index in the array corresponds to its index in the heap (i.e., 0 is the
+// region at the bottom of the heap, 1 is the one after it, etc.). Two
+// regions that are consecutive in the array should also be adjacent in the
+// address space (i.e., region(i).end() == region(i+1).bottom().
 //
 // We create a HeapRegion when we commit the region's address space
 // for the first time. When we uncommit the address space of a
@@ -51,56 +57,94 @@
 //
 // We keep track of three lengths:
 //
-// * _committed_length (returned by length()) is the number of currently
-//   committed regions.
-// * _allocated_length (not exposed outside this class) is the
-//   number of regions for which we have HeapRegions.
+// * _num_committed (returned by length()) is the number of currently
+//   committed regions. These may not be contiguous.
+// * _allocated_heapregions_length (not exposed outside this class) is the
+//   number of regions+1 for which we have HeapRegions.
 // * max_length() returns the maximum number of regions the heap can have.
 //
-// and maintain that: _committed_length <= _allocated_length <= max_length()
 
 class HeapRegionSeq: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   G1HeapRegionTable _regions;
 
-  // The number of regions committed in the heap.
-  uint _committed_length;
-
-  // A hint for which index to start searching from for humongous
-  // allocations.
-  uint _next_search_index;
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  G1RegionToSpaceMapper* _bot_mapper;
+  G1RegionToSpaceMapper* _cardtable_mapper;
+  G1RegionToSpaceMapper* _card_counts_mapper;
 
-  // The number of regions for which we have allocated HeapRegions for.
-  uint _allocated_length;
-
-  // Find a contiguous set of empty regions of length num, starting
-  // from the given index.
-  uint find_contiguous_from(uint from, uint num);
+  FreeRegionList _free_list;
 
-  void increment_allocated_length() {
-    assert(_allocated_length < max_length(), "pre-condition");
-    _allocated_length++;
-  }
+  // Each bit in this bitmap indicates that the corresponding region is available
+  // for allocation.
+  BitMap _available_map;
 
-  void increment_length() {
-    assert(length() < max_length(), "pre-condition");
-    _committed_length++;
-  }
+   // The number of regions committed in the heap.
+  uint _num_committed;
 
-  void decrement_length() {
-    assert(length() > 0, "pre-condition");
-    _committed_length--;
-  }
+  // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
+  uint _allocated_heapregions_length;
 
   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
+  void make_regions_available(uint index, uint num_regions = 1);
+
+  // Pass down commit calls to the VirtualSpace.
+  void commit_regions(uint index, size_t num_regions = 1);
+  void uncommit_regions(uint index, size_t num_regions = 1);
+
+  // Notify other data structures about change in the heap layout.
+  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+  // Calculate the starting region for each worker during parallel iteration so
+  // that they do not all start from the same region.
+  uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
+
+  // Find a contiguous set of empty or uncommitted regions of length num and return
+  // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
+  // If only_empty is true, only empty regions are considered.
+  // Searches from bottom to top of the heap, doing a first-fit.
+  uint find_contiguous(size_t num, bool only_empty);
+  // Finds the next sequence of unavailable regions starting from start_idx. Returns the
+  // length of the sequence found. If this result is zero, no such sequence could be found,
+  // otherwise res_idx indicates the start index of these regions.
+  uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
+  // Finds the next sequence of empty regions starting from start_idx, going backwards in
+  // the heap. Returns the length of the sequence found. If this value is zero, no
+  // sequence could be found, otherwise res_idx contains the start index of this range.
+  uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+  // Allocate a new HeapRegion for the given index.
+  HeapRegion* new_heap_region(uint hrs_index);
+#ifdef ASSERT
+public:
+  bool is_free(HeapRegion* hr) const;
+#endif
+  // Returns whether the given region is available for allocation.
+  bool is_available(uint region) const;
+
  public:
   // Empty constructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
+  HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
+                    _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
+                    _allocated_heapregions_length(0), _available_map(),
+                    _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
+  { }
 
-  void initialize(HeapWord* bottom, HeapWord* end);
+  void initialize(G1RegionToSpaceMapper* heap_storage,
+                  G1RegionToSpaceMapper* prev_bitmap,
+                  G1RegionToSpaceMapper* next_bitmap,
+                  G1RegionToSpaceMapper* bot,
+                  G1RegionToSpaceMapper* cardtable,
+                  G1RegionToSpaceMapper* card_counts);
+
+  // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
+  // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
+  // the heap from the lowest address, this region (and its associated data
+  // structures) are available and we do not need to check further.
+  HeapRegion* get_dummy_region() { return new_heap_region(0); }
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -110,45 +154,86 @@
   // HeapRegion, otherwise return NULL.
   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 
+  // Insert the given region into the free region list.
+  inline void insert_into_free_list(HeapRegion* hr);
+
+  // Insert the given region list into the global free region list.
+  void insert_list_into_free_list(FreeRegionList* list) {
+    _free_list.add_ordered(list);
+  }
+
+  HeapRegion* allocate_free_region(bool is_old) {
+    HeapRegion* hr = _free_list.remove_region(is_old);
+
+    if (hr != NULL) {
+      assert(hr->next() == NULL, "Single region should not have next");
+      assert(is_available(hr->hrs_index()), "Must be committed");
+    }
+    return hr;
+  }
+
+  inline void allocate_free_regions_starting_at(uint first, uint num_regions);
+
+  // Remove all regions from the free list.
+  void remove_all_free_regions() {
+    _free_list.remove_all();
+  }
+
+  // Return the number of committed free regions in the heap.
+  uint num_free_regions() const {
+    return _free_list.length();
+  }
+
+  size_t total_capacity_bytes() const {
+    return num_free_regions() * HeapRegion::GrainBytes;
+  }
+
+  // Return the number of available (uncommitted) regions.
+  uint available() const { return max_length() - length(); }
+
   // Return the number of regions that have been committed in the heap.
-  uint length() const { return _committed_length; }
+  uint length() const { return _num_committed; }
 
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
-  // Expand the sequence to reflect that the heap has grown from
-  // old_end to new_end. Either create new HeapRegions, or re-use
-  // existing ones, and return them in the given list. Returns the
-  // memory region that covers the newly-created regions. If a
-  // HeapRegion allocation fails, the result memory region might be
-  // smaller than the desired one.
-  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
-                      FreeRegionList* list);
+  MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
+
+  // Expand the sequence to reflect that the heap has grown. Either create new
+  // HeapRegions, or re-use existing ones. Returns the number of regions the
+  // sequence was expanded by. If a HeapRegion allocation fails, the resulting
+  // number of regions might be smaller than what's desired.
+  uint expand_by(uint num_regions);
 
-  // Return the number of contiguous regions at the end of the sequence
-  // that are available for allocation.
-  uint free_suffix();
+  // Makes sure that the regions from start to start+num_regions-1 are available
+  // for allocation. Returns the number of regions that were committed to achieve
+  // this.
+  uint expand_at(uint start, uint num_regions);
 
-  // Find a contiguous set of empty regions of length num and return
-  // the index of the first region or G1_NULL_HRS_INDEX if the
-  // search was unsuccessful.
-  uint find_contiguous(uint num);
+  // Find a contiguous set of empty regions of length num. Returns the start index of
+  // that set, or G1_NO_HRS_INDEX.
+  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  // Find a contiguous set of empty or unavailable regions of length num. Returns the
+  // start index of that set, or G1_NO_HRS_INDEX.
+  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+
+  HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
   // Apply blk->doHeapRegion() on all committed regions in address order,
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  // As above, but start the iteration from hr and loop around. If hr
-  // is NULL, we start from the first region in the heap.
-  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
 
-  // Tag as uncommitted as many regions that are completely free as
-  // possible, up to num_regions_to_remove, from the suffix of the committed
-  // sequence. Return the actual number of removed regions.
+  // Uncommit up to num_regions_to_remove regions that are completely free.
+  // Return the actual number of uncommitted regions.
   uint shrink_by(uint num_regions_to_remove);
 
+  void verify();
+
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
+
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
 inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   assert(addr < heap_end(),
@@ -35,16 +36,23 @@
         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
 
   HeapRegion* hr = _regions.get_by_address(addr);
-  assert(hr != NULL, "invariant");
   return hr;
 }
 
 inline HeapRegion* HeapRegionSeq::at(uint index) const {
-  assert(index < length(), "pre-condition");
+  assert(is_available(index), "pre-condition");
   HeapRegion* hr = _regions.get_by_index(index);
   assert(hr != NULL, "sanity");
   assert(hr->hrs_index() == index, "sanity");
   return hr;
 }
 
+inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
+  _free_list.add_ordered(hr);
+}
+
+inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
+  _free_list.remove_starting_at(at(first), num_regions);
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
@@ -67,7 +68,7 @@
   // Do the basic verification first before we do the checks over the regions.
   HeapRegionSetBase::verify();
 
-  _verify_in_progress        = true;
+  _verify_in_progress = true;
 }
 
 void HeapRegionSetBase::verify_end() {
@@ -103,62 +104,7 @@
 }
 
 void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
-  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
-}
-
-void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
-  check_mt_safety();
-  from_list->check_mt_safety();
-
-  verify_optional();
-  from_list->verify_optional();
-
-  if (from_list->is_empty()) {
-    return;
-  }
-
-#ifdef ASSERT
-  FreeRegionListIterator iter(from_list);
-  while (iter.more_available()) {
-    HeapRegion* hr = iter.get_next();
-    // In set_containing_set() we check that we either set the value
-    // from NULL to non-NULL or vice versa to catch bugs. So, we have
-    // to NULL it first before setting it to the value.
-    hr->set_containing_set(NULL);
-    hr->set_containing_set(this);
-  }
-#endif // ASSERT
-
-  if (_head == NULL) {
-    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
-    _head = from_list->_head;
-    _tail = from_list->_tail;
-  } else {
-    assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
-    if (as_head) {
-      from_list->_tail->set_next(_head);
-      _head->set_prev(from_list->_tail);
-      _head = from_list->_head;
-    } else {
-      _tail->set_next(from_list->_head);
-      from_list->_head->set_prev(_tail);
-      _tail = from_list->_tail;
-    }
-  }
-
-  _count.increment(from_list->length(), from_list->total_capacity_bytes());
-  from_list->clear();
-
-  verify_optional();
-  from_list->verify_optional();
-}
-
-void FreeRegionList::add_as_head(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, true /* as_head */);
-}
-
-void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, false /* as_head */);
+  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail);
 }
 
 void FreeRegionList::remove_all() {
@@ -191,11 +137,6 @@
     return;
   }
 
-  if (is_empty()) {
-    add_as_head(from_list);
-    return;
-  }
-
   #ifdef ASSERT
   FreeRegionListIterator iter(from_list);
   while (iter.more_available()) {
@@ -208,39 +149,45 @@
   }
   #endif // ASSERT
 
-  HeapRegion* curr_to = _head;
-  HeapRegion* curr_from = from_list->_head;
+  if (is_empty()) {
+    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
+    _head = from_list->_head;
+    _tail = from_list->_tail;
+  } else {
+    HeapRegion* curr_to = _head;
+    HeapRegion* curr_from = from_list->_head;
+
+    while (curr_from != NULL) {
+      while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
+        curr_to = curr_to->next();
+      }
 
-  while (curr_from != NULL) {
-    while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
-      curr_to = curr_to->next();
+      if (curr_to == NULL) {
+        // The rest of the from list should be added as tail
+        _tail->set_next(curr_from);
+        curr_from->set_prev(_tail);
+        curr_from = NULL;
+      } else {
+        HeapRegion* next_from = curr_from->next();
+
+        curr_from->set_next(curr_to);
+        curr_from->set_prev(curr_to->prev());
+        if (curr_to->prev() == NULL) {
+          _head = curr_from;
+        } else {
+          curr_to->prev()->set_next(curr_from);
+        }
+        curr_to->set_prev(curr_from);
+
+        curr_from = next_from;
+      }
     }
 
-    if (curr_to == NULL) {
-      // The rest of the from list should be added as tail
-      _tail->set_next(curr_from);
-      curr_from->set_prev(_tail);
-      curr_from = NULL;
-    } else {
-      HeapRegion* next_from = curr_from->next();
-
-      curr_from->set_next(curr_to);
-      curr_from->set_prev(curr_to->prev());
-      if (curr_to->prev() == NULL) {
-        _head = curr_from;
-      } else {
-        curr_to->prev()->set_next(curr_from);
-      }
-      curr_to->set_prev(curr_from);
-
-      curr_from = next_from;
+    if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
+      _tail = from_list->_tail;
     }
   }
 
-  if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
-    _tail = from_list->_tail;
-  }
-
   _count.increment(from_list->length(), from_list->total_capacity_bytes());
   from_list->clear();
 
@@ -248,68 +195,59 @@
   from_list->verify_optional();
 }
 
-void FreeRegionList::remove_all_pending(uint target_count) {
+void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
   check_mt_safety();
-  assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
+  assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition"));
   assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
 
   verify_optional();
   DEBUG_ONLY(uint old_length = length();)
 
-  HeapRegion* curr = _head;
+  HeapRegion* curr = first;
   uint count = 0;
-  while (curr != NULL) {
+  while (count < num_regions) {
     verify_region(curr);
     HeapRegion* next = curr->next();
     HeapRegion* prev = curr->prev();
 
-    if (curr->pending_removal()) {
-      assert(count < target_count,
-             hrs_err_msg("[%s] should not come across more regions "
-                         "pending for removal than target_count: %u",
-                         name(), target_count));
+    assert(count < num_regions,
+           hrs_err_msg("[%s] should not come across more regions "
+                       "pending for removal than num_regions: %u",
+                       name(), num_regions));
 
-      if (prev == NULL) {
-        assert(_head == curr, hrs_ext_msg(this, "invariant"));
-        _head = next;
-      } else {
-        assert(_head != curr, hrs_ext_msg(this, "invariant"));
-        prev->set_next(next);
-      }
-      if (next == NULL) {
-        assert(_tail == curr, hrs_ext_msg(this, "invariant"));
-        _tail = prev;
-      } else {
-        assert(_tail != curr, hrs_ext_msg(this, "invariant"));
-        next->set_prev(prev);
-      }
-      if (_last = curr) {
-        _last = NULL;
-      }
+    if (prev == NULL) {
+      assert(_head == curr, hrs_ext_msg(this, "invariant"));
+      _head = next;
+    } else {
+      assert(_head != curr, hrs_ext_msg(this, "invariant"));
+      prev->set_next(next);
+    }
+    if (next == NULL) {
+      assert(_tail == curr, hrs_ext_msg(this, "invariant"));
+      _tail = prev;
+    } else {
+      assert(_tail != curr, hrs_ext_msg(this, "invariant"));
+      next->set_prev(prev);
+    }
+    if (_last = curr) {
+      _last = NULL;
+    }
 
-      curr->set_next(NULL);
-      curr->set_prev(NULL);
-      remove(curr);
-      curr->set_pending_removal(false);
-
-      count += 1;
+    curr->set_next(NULL);
+    curr->set_prev(NULL);
+    remove(curr);
 
-      // If we have come across the target number of regions we can
-      // just bail out. However, for debugging purposes, we can just
-      // carry on iterating to make sure there are not more regions
-      // tagged with pending removal.
-      DEBUG_ONLY(if (count == target_count) break;)
-    }
+    count++;
     curr = next;
   }
 
-  assert(count == target_count,
-         hrs_err_msg("[%s] count: %u should be == target_count: %u",
-                     name(), count, target_count));
-  assert(length() + target_count == old_length,
+  assert(count == num_regions,
+         hrs_err_msg("[%s] count: %u should be == num_regions: %u",
+                     name(), count, num_regions));
+  assert(length() + num_regions == old_length,
          hrs_err_msg("[%s] new length should be consistent "
-                     "new length: %u old length: %u target_count: %u",
-                     name(), length(), old_length, target_count));
+                     "new length: %u old length: %u num_regions: %u",
+                     name(), length(), old_length, num_regions));
 
   verify_optional();
 }
@@ -348,10 +286,12 @@
       hr->print_on(out);
     }
   }
+
+  out->cr();
 }
 
 void FreeRegionList::verify_list() {
-  HeapRegion* curr = head();
+  HeapRegion* curr = _head;
   HeapRegion* prev1 = NULL;
   HeapRegion* prev0 = NULL;
   uint count = 0;
@@ -379,7 +319,7 @@
     curr = curr->next();
   }
 
-  guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
+  guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
   guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
@@ -463,3 +403,41 @@
               "master humongous set MT safety protocol outside a safepoint");
   }
 }
+
+void FreeRegionList_test() {
+  FreeRegionList l("test");
+
+  const uint num_regions_in_test = 5;
+  // Create a fake heap. It does not need to be valid, as the HeapRegion constructor
+  // does not access it.
+  MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
+  // Allocate a fake BOT because the HeapRegion constructor initializes
+  // the BOT.
+  size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
+  HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+  G1BlockOffsetSharedArray oa(heap, bot_storage);
+  bot_storage->commit_regions(0, num_regions_in_test);
+  HeapRegion hr0(0, &oa, heap);
+  HeapRegion hr1(1, &oa, heap);
+  HeapRegion hr2(2, &oa, heap);
+  HeapRegion hr3(3, &oa, heap);
+  HeapRegion hr4(4, &oa, heap);
+  l.add_ordered(&hr1);
+  l.add_ordered(&hr0);
+  l.add_ordered(&hr3);
+  l.add_ordered(&hr4);
+  l.add_ordered(&hr2);
+  assert(l.length() == num_regions_in_test, "wrong length");
+  l.verify_list();
+
+  bot_storage->uncommit_regions(0, num_regions_in_test);
+  delete bot_storage;
+  FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -162,7 +162,7 @@
 // diagnosing failures.
 class hrs_ext_msg : public hrs_err_msg {
 public:
-  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") {
+  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") {
     set->fill_in_ext_msg(this, message);
   }
 };
@@ -192,13 +192,9 @@
 };
 
 // A set that links all the regions added to it in a doubly-linked
-// list. We should try to avoid doing operations that iterate over
+// sorted list. We should try to avoid doing operations that iterate over
 // such lists in performance critical paths. Typically we should
-// add / remove one region at a time or concatenate two lists. There are
-// two ways to treat your lists, ordered and un-ordered. All un-ordered
-// operations are done in constant time. To keep a list ordered only use
-// add_ordered() to add elements to the list. If a list is not ordered
-// from start, there is no way to sort it later.
+// add / remove one region at a time or concatenate two lists.
 
 class FreeRegionListIterator;
 
@@ -210,13 +206,13 @@
   HeapRegion* _tail;
 
   // _last is used to keep track of where we added an element the last
-  // time in ordered lists. It helps to improve performance when adding
-  // several ordered items in a row.
+  // time. It helps to improve performance when adding several ordered items in a row.
   HeapRegion* _last;
 
   static uint _unrealistically_long_length;
 
-  void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
+  inline HeapRegion* remove_from_head_impl();
+  inline HeapRegion* remove_from_tail_impl();
 
 protected:
   virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
@@ -232,8 +228,11 @@
 
   void verify_list();
 
-  HeapRegion* head() { return _head; }
-  HeapRegion* tail() { return _tail; }
+#ifdef ASSERT
+  bool contains(HeapRegion* hr) const {
+    return hr->containing_set() == this;
+  }
+#endif
 
   static void set_unrealistically_long_length(uint len);
 
@@ -242,55 +241,20 @@
   // is determined by hrs_index.
   inline void add_ordered(HeapRegion* hr);
 
-  // It adds hr to the list as the new head. The region should not be
-  // a member of another set.
-  inline void add_as_head(HeapRegion* hr);
-
-  // It adds hr to the list as the new tail. The region should not be
-  // a member of another set.
-  inline void add_as_tail(HeapRegion* hr);
-
-  // It removes and returns the head of the list. It assumes that the
-  // list is not empty so it will return a non-NULL value.
-  inline HeapRegion* remove_head();
-
-  // Convenience method.
-  inline HeapRegion* remove_head_or_null();
-
-  // Removes and returns the last element (_tail) of the list. It assumes
-  // that the list isn't empty so that it can return a non-NULL value.
-  inline HeapRegion* remove_tail();
-
-  // Convenience method
-  inline HeapRegion* remove_tail_or_null();
-
   // Removes from head or tail based on the given argument.
-  inline HeapRegion* remove_region(bool from_head);
+  HeapRegion* remove_region(bool from_head);
 
   // Merge two ordered lists. The result is also ordered. The order is
   // determined by hrs_index.
   void add_ordered(FreeRegionList* from_list);
 
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the beginning of this list.
-  void add_as_head(FreeRegionList* from_list);
-
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the end of this list.
-  void add_as_tail(FreeRegionList* from_list);
-
   // It empties the list by removing all regions from it.
   void remove_all();
 
-  // It removes all regions in the list that are pending for removal
-  // (i.e., they have been tagged with "pending_removal"). The list
-  // must not be empty, target_count should reflect the exact number
-  // of regions that are pending for removal in the list, and
-  // target_count should be > 1 (currently, we never need to remove a
-  // single region using this).
-  void remove_all_pending(uint target_count);
+  // Remove all (contiguous) regions from first to first + num_regions -1 from
+  // this list.
+  // Num_regions must be > 1.
+  void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
 
@@ -298,7 +262,7 @@
 };
 
 // Iterator class that provides a convenient way to iterate over the
-// regions of a HeapRegionLinkedList instance.
+// regions of a FreeRegionList.
 
 class FreeRegionListIterator : public StackObj {
 private:
@@ -324,7 +288,7 @@
   }
 
   FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
-    _curr = list->head();
+    _curr = list->_head;
   }
 };
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -30,7 +30,8 @@
 inline void HeapRegionSetBase::add(HeapRegion* hr) {
   check_mt_safety();
   assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
 
   _count.increment(1u, hr->capacity());
   hr->set_containing_set(this);
@@ -40,7 +41,8 @@
 inline void HeapRegionSetBase::remove(HeapRegion* hr) {
   check_mt_safety();
   verify_region(hr);
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
 
   hr->set_containing_set(NULL);
   assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
@@ -48,8 +50,7 @@
 }
 
 inline void FreeRegionList::add_ordered(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
+  assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
          (length() >  0 && _head != NULL && _tail != NULL),
          hrs_ext_msg(this, "invariant"));
   // add() will verify the region and check mt safety.
@@ -95,55 +96,48 @@
   _last = hr;
 }
 
-inline void FreeRegionList::add_as_head(HeapRegion* hr) {
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_head != NULL) {
-    hr->set_next(_head);
-    _head->set_prev(hr);
-  } else {
-    _tail = hr;
-  }
-  _head = hr;
-}
-
-inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_tail != NULL) {
-    _tail->set_next(hr);
-    hr->set_prev(_tail);
-  } else {
-    _head = hr;
-  }
-  _tail = hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_head() {
-  assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first.
-  HeapRegion* hr = _head;
-  _head = hr->next();
+inline HeapRegion* FreeRegionList::remove_from_head_impl() {
+  HeapRegion* result = _head;
+  _head = result->next();
   if (_head == NULL) {
     _tail = NULL;
   } else {
     _head->set_prev(NULL);
   }
-  hr->set_next(NULL);
+  result->set_next(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
+  HeapRegion* result = _tail;
+
+  _tail = result->prev();
+  if (_tail == NULL) {
+    _head = NULL;
+  } else {
+    _tail->set_next(NULL);
+  }
+  result->set_prev(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
+  check_mt_safety();
+  verify_optional();
+
+  if (is_empty()) {
+    return NULL;
+  }
+  assert(length() > 0 && _head != NULL && _tail != NULL,
+         hrs_ext_msg(this, "invariant"));
+
+  HeapRegion* hr;
+
+  if (from_head) {
+    hr = remove_from_head_impl();
+  } else {
+    hr = remove_from_tail_impl();
+  }
 
   if (_last == hr) {
     _last = NULL;
@@ -154,56 +148,5 @@
   return hr;
 }
 
-inline HeapRegion* FreeRegionList::remove_head_or_null() {
-  check_mt_safety();
-  if (!is_empty()) {
-    return remove_head();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_tail() {
-  assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first
-  HeapRegion* hr = _tail;
-
-  _tail = hr->prev();
-  if (_tail == NULL) {
-    _head = NULL;
-  } else {
-    _tail->set_next(NULL);
-  }
-  hr->set_prev(NULL);
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
 
-  if (_last == hr) {
-    _last = NULL;
-  }
-
-  // remove() will verify the region and check mt safety.
-  remove(hr);
-  return hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_tail_or_null() {
-  check_mt_safety();
-
-  if (!is_empty()) {
-    return remove_tail();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
-  if (from_head) {
-    return remove_head_or_null();
-  } else {
-    return remove_tail_or_null();
-  }
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -43,10 +43,9 @@
   nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
                                                                               \
   nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
-  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
+  nonstatic_field(HeapRegionSeq,   _num_committed,      uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
-  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -78,6 +78,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+  barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
   if (_barrier_set == NULL) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -70,7 +70,7 @@
     "must be a ParallelScavengeHeap");
 
   GCCauseSetter gccs(heap, _gc_cause);
-  if (_gc_cause == GCCause::_gc_locker
+  if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
       DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
     // If (and only if) the scavenge fails, this will invoke a full gc.
     heap->invoke_scavenge();
--- a/hotspot/src/share/vm/gc_interface/gcCause.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -51,6 +51,9 @@
     case _heap_dump:
       return "Heap Dump Initiated GC";
 
+    case _wb_young_gc:
+      return "WhiteBox Initiated Young GC";
+
     case _no_gc:
       return "No GC";
 
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -46,6 +46,7 @@
     _gc_locker,
     _heap_inspection,
     _heap_dump,
+    _wb_young_gc,
 
     /* implementation independent, but reserved for GC use */
     _no_gc,
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -3402,7 +3402,7 @@
   tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
   tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
   tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
-  tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
+  tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
   tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
   tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
   tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
--- a/hotspot/src/share/vm/memory/allocation.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -265,7 +265,8 @@
   f(ConstantPool) \
   f(ConstantPoolCache) \
   f(Annotation) \
-  f(MethodCounters)
+  f(MethodCounters) \
+  f(Deallocated)
 
 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -44,13 +44,6 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-size_t CardTableModRefBS::cards_required(size_t covered_words)
-{
-  // Add one for a guard card, used to detect errors.
-  const size_t words = align_size_up(covered_words, card_size_in_words);
-  return words / card_size_in_words + 1;
-}
-
 size_t CardTableModRefBS::compute_byte_map_size()
 {
   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
@@ -64,27 +57,50 @@
                                      int max_covered_regions):
   ModRefBarrierSet(max_covered_regions),
   _whole_heap(whole_heap),
-  _guard_index(cards_required(whole_heap.word_size()) - 1),
-  _last_valid_index(_guard_index - 1),
+  _guard_index(0),
+  _guard_region(),
+  _last_valid_index(0),
   _page_size(os::vm_page_size()),
-  _byte_map_size(compute_byte_map_size())
+  _byte_map_size(0),
+  _covered(NULL),
+  _committed(NULL),
+  _cur_covered_regions(0),
+  _byte_map(NULL),
+  byte_map_base(NULL),
+  // LNC functionality
+  _lowest_non_clean(NULL),
+  _lowest_non_clean_chunk_size(NULL),
+  _lowest_non_clean_base_chunk_index(NULL),
+  _last_LNC_resizing_collection(NULL)
 {
   _kind = BarrierSet::CardTableModRef;
 
+  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
+  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
+
+  assert(card_size <= 512, "card_size must be less than 512"); // why?
+
+  _covered   = new MemRegion[_max_covered_regions];
+  if (_covered == NULL) {
+    vm_exit_during_initialization("Could not allocate card table covered region set.");
+  }
+}
+
+void CardTableModRefBS::initialize() {
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  _byte_map_size = compute_byte_map_size();
+
   HeapWord* low_bound  = _whole_heap.start();
   HeapWord* high_bound = _whole_heap.end();
-  assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
-  assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
 
-  assert(card_size <= 512, "card_size must be less than 512"); // why?
-
-  _covered   = new MemRegion[max_covered_regions];
-  _committed = new MemRegion[max_covered_regions];
-  if (_covered == NULL || _committed == NULL) {
-    vm_exit_during_initialization("couldn't alloc card table covered region set.");
+  _cur_covered_regions = 0;
+  _committed = new MemRegion[_max_covered_regions];
+  if (_committed == NULL) {
+    vm_exit_during_initialization("Could not allocate card table committed region set.");
   }
 
-  _cur_covered_regions = 0;
   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
@@ -114,20 +130,20 @@
                             !ExecMem, "card table last card");
   *guard_card = last_card;
 
-   _lowest_non_clean =
-    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
+  _lowest_non_clean =
+    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
   _lowest_non_clean_chunk_size =
-    NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
   _lowest_non_clean_base_chunk_index =
-    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
   _last_LNC_resizing_collection =
-    NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
   if (_lowest_non_clean == NULL
       || _lowest_non_clean_chunk_size == NULL
       || _lowest_non_clean_base_chunk_index == NULL
       || _last_LNC_resizing_collection == NULL)
     vm_exit_during_initialization("couldn't allocate an LNC array.");
-  for (int i = 0; i < max_covered_regions; i++) {
+  for (int i = 0; i < _max_covered_regions; i++) {
     _lowest_non_clean[i] = NULL;
     _lowest_non_clean_chunk_size[i] = 0;
     _last_LNC_resizing_collection[i] = -1;
@@ -650,7 +666,7 @@
                                       jbyte val, bool val_equals) {
   jbyte* start    = byte_for(mr.start());
   jbyte* end      = byte_for(mr.last());
-  bool   failures = false;
+  bool failures = false;
   for (jbyte* curr = start; curr <= end; ++curr) {
     jbyte curr_val = *curr;
     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -96,12 +96,12 @@
   // The declaration order of these const fields is important; see the
   // constructor before changing.
   const MemRegion _whole_heap;       // the region covered by the card table
-  const size_t    _guard_index;      // index of very last element in the card
+  size_t          _guard_index;      // index of very last element in the card
                                      // table; it is set to a guard value
                                      // (last_card) and should never be modified
-  const size_t    _last_valid_index; // index of the last valid element
+  size_t          _last_valid_index; // index of the last valid element
   const size_t    _page_size;        // page size used when mapping _byte_map
-  const size_t    _byte_map_size;    // in bytes
+  size_t          _byte_map_size;    // in bytes
   jbyte*          _byte_map;         // the card marking array
 
   int _cur_covered_regions;
@@ -123,7 +123,12 @@
  protected:
   // Initialization utilities; covered_words is the size of the covered region
   // in, um, words.
-  inline size_t cards_required(size_t covered_words);
+  inline size_t cards_required(size_t covered_words) {
+    // Add one for a guard card, used to detect errors.
+    const size_t words = align_size_up(covered_words, card_size_in_words);
+    return words / card_size_in_words + 1;
+  }
+
   inline size_t compute_byte_map_size();
 
   // Finds and return the index of the region, if any, to which the given
@@ -137,7 +142,7 @@
   int find_covering_region_containing(HeapWord* addr);
 
   // Resize one of the regions covered by the remembered set.
-  void resize_covered_region(MemRegion new_region);
+  virtual void resize_covered_region(MemRegion new_region);
 
   // Returns the leftmost end of a committed region corresponding to a
   // covered region before covered region "ind", or else "NULL" if "ind" is
@@ -282,6 +287,8 @@
   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
   ~CardTableModRefBS();
 
+  virtual void initialize();
+
   // *** Barrier set functions.
 
   bool has_write_ref_pre_barrier() { return false; }
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -54,6 +54,7 @@
 #else
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
 #endif
+  _ct_bs->initialize();
   set_bs(_ct_bs);
   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
                          mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
--- a/hotspot/src/share/vm/memory/filemap.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -24,9 +24,14 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
 #include "classfile/altHashing.hpp"
 #include "memory/filemap.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/objArrayOop.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
@@ -42,7 +47,6 @@
 #endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-
 extern address JVM_FunctionAtStart();
 extern address JVM_FunctionAtEnd();
 
@@ -78,16 +82,27 @@
 void FileMapInfo::fail_continue(const char *msg, ...) {
   va_list ap;
   va_start(ap, msg);
-  if (RequireSharedSpaces) {
-    fail(msg, ap);
+  MetaspaceShared::set_archive_loading_failed();
+  if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) {
+    // If we are doing PrintSharedArchiveAndExit and some of the classpath entries
+    // do not validate, we can still continue "limping" to validate the remaining
+    // entries. No need to quit.
+    tty->print("[");
+    tty->vprint(msg, ap);
+    tty->print_cr("]");
   } else {
-    if (PrintSharedSpaces) {
-      tty->print_cr("UseSharedSpaces: %s", msg);
+    if (RequireSharedSpaces) {
+      fail(msg, ap);
+    } else {
+      if (PrintSharedSpaces) {
+        tty->print_cr("UseSharedSpaces: %s", msg);
+      }
     }
   }
   va_end(ap);
   UseSharedSpaces = false;
-  close();
+  assert(current_info() != NULL, "singleton must be registered");
+  current_info()->close();
 }
 
 // Fill in the fileMapInfo structure with data about this VM instance.
@@ -122,67 +137,201 @@
   }
 }
 
+FileMapInfo::FileMapInfo() {
+  assert(_current_info == NULL, "must be singleton"); // not thread safe
+  _current_info = this;
+  memset(this, 0, sizeof(FileMapInfo));
+  _file_offset = 0;
+  _file_open = false;
+  _header = SharedClassUtil::allocate_file_map_header();
+  _header->_version = _invalid_version;
+}
+
+FileMapInfo::~FileMapInfo() {
+  assert(_current_info == this, "must be singleton"); // not thread safe
+  _current_info = NULL;
+}
+
 void FileMapInfo::populate_header(size_t alignment) {
-  _header._magic = 0xf00baba2;
-  _header._version = _current_version;
-  _header._alignment = alignment;
-  _header._obj_alignment = ObjectAlignmentInBytes;
+  _header->populate(this, alignment);
+}
+
+size_t FileMapInfo::FileMapHeader::data_size() {
+  return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
+}
+
+void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
+  _magic = 0xf00baba2;
+  _version = _current_version;
+  _alignment = alignment;
+  _obj_alignment = ObjectAlignmentInBytes;
+  _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
+  _classpath_entry_table = mapinfo->_classpath_entry_table;
+  _classpath_entry_size = mapinfo->_classpath_entry_size;
 
   // The following fields are for sanity checks for whether this archive
   // will function correctly with this JVM and the bootclasspath it's
   // invoked with.
 
   // JVM version string ... changes on each build.
-  get_header_version(_header._jvm_ident);
+  get_header_version(_jvm_ident);
+}
+
+void FileMapInfo::allocate_classpath_entry_table() {
+  int bytes = 0;
+  int count = 0;
+  char* strptr = NULL;
+  char* strptr_max = NULL;
+  Thread* THREAD = Thread::current();
 
-  // Build checks on classpath and jar files
-  _header._num_jars = 0;
-  ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
-  for ( ; cpe != NULL; cpe = cpe->next()) {
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
 
-    if (cpe->is_jar_file()) {
-      if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
-        fail_stop("Too many jar files to share.", NULL);
-      }
+  for (int pass=0; pass<2; pass++) {
+    ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
+
+    for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
+      const char *name = cpe->name();
+      int name_bytes = (int)(strlen(name) + 1);
 
-      // Jar file - record timestamp and file size.
-      struct stat st;
-      const char *path = cpe->name();
-      if (os::stat(path, &st) != 0) {
-        // If we can't access a jar file in the boot path, then we can't
-        // make assumptions about where classes get loaded from.
-        fail_stop("Unable to open jar file %s.", path);
-      }
-      _header._jar[_header._num_jars]._timestamp = st.st_mtime;
-      _header._jar[_header._num_jars]._filesize = st.st_size;
-      _header._num_jars++;
-    } else {
+      if (pass == 0) {
+        count ++;
+        bytes += (int)entry_size;
+        bytes += name_bytes;
+        if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+          tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
+        }
+      } else {
+        SharedClassPathEntry* ent = shared_classpath(cur_entry);
+        if (cpe->is_jar_file()) {
+          struct stat st;
+          if (os::stat(name, &st) != 0) {
+            // The file/dir must exist, or it would not have been added
+            // into ClassLoader::classpath_entry().
+            //
+            // If we can't access a jar file in the boot path, then we can't
+            // make assumptions about where classes get loaded from.
+            FileMapInfo::fail_stop("Unable to open jar file %s.", name);
+          }
 
-      // If directories appear in boot classpath, they must be empty to
-      // avoid having to verify each individual class file.
-      const char* name = ((ClassPathDirEntry*)cpe)->name();
-      if (!os::dir_is_empty(name)) {
-        fail_stop("Boot classpath directory %s is not empty.", name);
+          EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+          SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
+        } else {
+          ent->_filesize  = -1;
+          if (!os::dir_is_empty(name)) {
+            ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
+          }
+        }
+        ent->_name = strptr;
+        if (strptr + name_bytes <= strptr_max) {
+          strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
+          strptr += name_bytes;
+        } else {
+          assert(0, "miscalculated buffer size");
+        }
       }
     }
+
+    if (pass == 0) {
+      EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+      Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
+      strptr = (char*)(arr->data());
+      strptr_max = strptr + bytes;
+      SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
+      strptr += entry_size * count;
+
+      _classpath_entry_table_size = count;
+      _classpath_entry_table = table;
+      _classpath_entry_size = entry_size;
+    }
   }
 }
 
+bool FileMapInfo::validate_classpath_entry_table() {
+  _validating_classpath_entry_table = true;
+
+  int count = _header->_classpath_entry_table_size;
+
+  _classpath_entry_table = _header->_classpath_entry_table;
+  _classpath_entry_size = _header->_classpath_entry_size;
+
+  for (int i=0; i<count; i++) {
+    SharedClassPathEntry* ent = shared_classpath(i);
+    struct stat st;
+    const char* name = ent->_name;
+    bool ok = true;
+    if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+      tty->print_cr("[Checking shared classpath entry: %s]", name);
+    }
+    if (os::stat(name, &st) != 0) {
+      fail_continue("Required classpath entry does not exist: %s", name);
+      ok = false;
+    } else if (ent->is_dir()) {
+      if (!os::dir_is_empty(name)) {
+        fail_continue("directory is not empty: %s", name);
+        ok = false;
+      }
+    } else {
+      if (ent->_timestamp != st.st_mtime ||
+          ent->_filesize != st.st_size) {
+        ok = false;
+        if (PrintSharedArchiveAndExit) {
+          fail_continue(ent->_timestamp != st.st_mtime ?
+                        "Timestamp mismatch" :
+                        "File size mismatch");
+        } else {
+          fail_continue("A jar file is not the one used while building"
+                        " the shared archive file: %s", name);
+        }
+      }
+    }
+    if (ok) {
+      if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+        tty->print_cr("[ok]");
+      }
+    } else if (!PrintSharedArchiveAndExit) {
+      _validating_classpath_entry_table = false;
+      return false;
+    }
+  }
+
+  _classpath_entry_table_size = _header->_classpath_entry_table_size;
+  _validating_classpath_entry_table = false;
+  return true;
+}
+
 
 // Read the FileMapInfo information from the file.
 
 bool FileMapInfo::init_from_file(int fd) {
-
-  size_t n = read(fd, &_header, sizeof(struct FileMapHeader));
-  if (n != sizeof(struct FileMapHeader)) {
+  size_t sz = _header->data_size();
+  char* addr = _header->data();
+  size_t n = os::read(fd, addr, (unsigned int)sz);
+  if (n != sz) {
     fail_continue("Unable to read the file header.");
     return false;
   }
-  if (_header._version != current_version()) {
+  if (_header->_version != current_version()) {
     fail_continue("The shared archive file has the wrong version.");
     return false;
   }
   _file_offset = (long)n;
+
+  size_t info_size = _header->_paths_misc_info_size;
+  _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
+  if (_paths_misc_info == NULL) {
+    fail_continue("Unable to read the file header.");
+    return false;
+  }
+  n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
+  if (n != info_size) {
+    fail_continue("Unable to read the shared path info header.");
+    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+    _paths_misc_info = NULL;
+    return false;
+  }
+
+  _file_offset += (long)n;
   return true;
 }
 
@@ -237,7 +386,16 @@
 // Write the header to the file, seek to the next allocation boundary.
 
 void FileMapInfo::write_header() {
-  write_bytes_aligned(&_header, sizeof(FileMapHeader));
+  int info_size = ClassLoader::get_shared_paths_misc_info_size();
+
+  _header->_paths_misc_info_size = info_size;
+
+  align_file_position();
+  size_t sz = _header->data_size();
+  char* addr = _header->data();
+  write_bytes(addr, (int)sz); // skip the C++ vtable
+  write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size);
+  align_file_position();
 }
 
 
@@ -247,7 +405,7 @@
   align_file_position();
   size_t used = space->used_bytes_slow(Metaspace::NonClassType);
   size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
 }
 
@@ -257,7 +415,7 @@
 void FileMapInfo::write_region(int region, char* base, size_t size,
                                size_t capacity, bool read_only,
                                bool allow_exec) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[region];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
 
   if (_file_open) {
     guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
@@ -339,7 +497,7 @@
 // JVM/TI RedefineClasses() support:
 // Remap the shared readonly space to shared readwrite, private.
 bool FileMapInfo::remap_shared_readonly_as_readwrite() {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
   if (!si->_read_only) {
     // the space is already readwrite so we are done
     return true;
@@ -367,7 +525,7 @@
 
 // Map the whole region at once, assumed to be allocated contiguously.
 ReservedSpace FileMapInfo::reserve_shared_memory() {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
   char* requested_addr = si->_base;
 
   size_t size = FileMapInfo::shared_spaces_size();
@@ -389,7 +547,7 @@
 static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
 
 char* FileMapInfo::map_region(int i) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
   size_t alignment = os::vm_allocation_granularity();
   size_t size = align_size_up(used, alignment);
@@ -415,7 +573,7 @@
 // Unmap a memory region in the address space.
 
 void FileMapInfo::unmap_region(int i) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
   size_t size = align_size_up(used, os::vm_allocation_granularity());
   if (!os::unmap_memory(si->_base, size)) {
@@ -432,12 +590,21 @@
 
 
 FileMapInfo* FileMapInfo::_current_info = NULL;
-
+SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
+int FileMapInfo::_classpath_entry_table_size = 0;
+size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
+bool FileMapInfo::_validating_classpath_entry_table = false;
 
 // Open the shared archive file, read and validate the header
 // information (version, boot classpath, etc.).  If initialization
 // fails, shared spaces are disabled and the file is closed. [See
 // fail_continue.]
+//
+// Validation of the archive is done in two steps:
+//
+// [1] validate_header() - done here. This checks the header, including _paths_misc_info.
+// [2] validate_classpath_entry_table - this is done later, because the table is in the RW
+//     region of the archive, which is not mapped yet.
 bool FileMapInfo::initialize() {
   assert(UseSharedSpaces, "UseSharedSpaces expected.");
 
@@ -451,92 +618,66 @@
   }
 
   init_from_file(_fd);
-  if (!validate()) {
+  if (!validate_header()) {
     return false;
   }
 
-  SharedReadOnlySize =  _header._space[0]._capacity;
-  SharedReadWriteSize = _header._space[1]._capacity;
-  SharedMiscDataSize =  _header._space[2]._capacity;
-  SharedMiscCodeSize =  _header._space[3]._capacity;
+  SharedReadOnlySize =  _header->_space[0]._capacity;
+  SharedReadWriteSize = _header->_space[1]._capacity;
+  SharedMiscDataSize =  _header->_space[2]._capacity;
+  SharedMiscCodeSize =  _header->_space[3]._capacity;
   return true;
 }
 
-
-bool FileMapInfo::validate() {
-  if (_header._version != current_version()) {
-    fail_continue("The shared archive file is the wrong version.");
+bool FileMapInfo::FileMapHeader::validate() {
+  if (_version != current_version()) {
+    FileMapInfo::fail_continue("The shared archive file is the wrong version.");
     return false;
   }
-  if (_header._magic != (int)0xf00baba2) {
-    fail_continue("The shared archive file has a bad magic number.");
+  if (_magic != (int)0xf00baba2) {
+    FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
     return false;
   }
   char header_version[JVM_IDENT_MAX];
   get_header_version(header_version);
-  if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
-    fail_continue("The shared archive file was created by a different"
-                  " version or build of HotSpot.");
-    return false;
-  }
-  if (_header._obj_alignment != ObjectAlignmentInBytes) {
-    fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
-                  " does not equal the current ObjectAlignmentInBytes of %d.",
-                  _header._obj_alignment, ObjectAlignmentInBytes);
-    return false;
-  }
-
-  // Cannot verify interpreter yet, as it can only be created after the GC
-  // heap has been initialized.
-
-  if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
-    fail_continue("Too many jar files to share.");
+  if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
+    if (TraceClassPaths) {
+      tty->print_cr("Expected: %s", header_version);
+      tty->print_cr("Actual:   %s", _jvm_ident);
+    }
+    FileMapInfo::fail_continue("The shared archive file was created by a different"
+                  " version or build of HotSpot");
     return false;
   }
-
-  // Build checks on classpath and jar files
-  int num_jars_now = 0;
-  ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
-  for ( ; cpe != NULL; cpe = cpe->next()) {
-
-    if (cpe->is_jar_file()) {
-      if (num_jars_now < _header._num_jars) {
-
-        // Jar file - verify timestamp and file size.
-        struct stat st;
-        const char *path = cpe->name();
-        if (os::stat(path, &st) != 0) {
-          fail_continue("Unable to open jar file %s.", path);
-          return false;
-        }
-        if (_header._jar[num_jars_now]._timestamp != st.st_mtime ||
-            _header._jar[num_jars_now]._filesize != st.st_size) {
-          fail_continue("A jar file is not the one used while building"
-                        " the shared archive file.");
-          return false;
-        }
-      }
-      ++num_jars_now;
-    } else {
-
-      // If directories appear in boot classpath, they must be empty to
-      // avoid having to verify each individual class file.
-      const char* name = ((ClassPathDirEntry*)cpe)->name();
-      if (!os::dir_is_empty(name)) {
-        fail_continue("Boot classpath directory %s is not empty.", name);
-        return false;
-      }
-    }
-  }
-  if (num_jars_now < _header._num_jars) {
-    fail_continue("The number of jar files in the boot classpath is"
-                  " less than the number the shared archive was created with.");
+  if (_obj_alignment != ObjectAlignmentInBytes) {
+    FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
+                  " does not equal the current ObjectAlignmentInBytes of %d.",
+                  _obj_alignment, ObjectAlignmentInBytes);
     return false;
   }
 
   return true;
 }
 
+bool FileMapInfo::validate_header() {
+  bool status = _header->validate();
+
+  if (status) {
+    if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
+      if (!PrintSharedArchiveAndExit) {
+        fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
+        status = false;
+      }
+    }
+  }
+
+  if (_paths_misc_info != NULL) {
+    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+    _paths_misc_info = NULL;
+  }
+  return status;
+}
+
 // The following method is provided to see whether a given pointer
 // falls in the mapped shared space.
 // Param:
@@ -545,8 +686,8 @@
 // True if the p is within the mapped shared space, otherwise, false.
 bool FileMapInfo::is_in_shared_space(const void* p) {
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-    if (p >= _header._space[i]._base &&
-        p < _header._space[i]._base + _header._space[i]._used) {
+    if (p >= _header->_space[i]._base &&
+        p < _header->_space[i]._base + _header->_space[i]._used) {
       return true;
     }
   }
@@ -557,7 +698,7 @@
 void FileMapInfo::print_shared_spaces() {
   gclog_or_tty->print_cr("Shared Spaces:");
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-    struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
     gclog_or_tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
                         shared_region_name[i],
                         si->_base, si->_base + si->_used);
@@ -570,9 +711,9 @@
   if (map_info) {
     map_info->fail_continue(msg);
     for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-      if (map_info->_header._space[i]._base != NULL) {
+      if (map_info->_header->_space[i]._base != NULL) {
         map_info->unmap_region(i);
-        map_info->_header._space[i]._base = NULL;
+        map_info->_header->_space[i]._base = NULL;
       }
     }
   } else if (DumpSharedSpaces) {
--- a/hotspot/src/share/vm/memory/filemap.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/filemap.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -37,30 +37,55 @@
 //  misc data (block offset table, string table, symbols, dictionary, etc.)
 //  tag(666)
 
-static const int JVM_SHARED_JARS_MAX = 128;
-static const int JVM_SPACENAME_MAX = 128;
 static const int JVM_IDENT_MAX = 256;
-static const int JVM_ARCH_MAX = 12;
-
 
 class Metaspace;
 
+class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
+public:
+  const char *_name;
+  time_t _timestamp;          // jar timestamp,  0 if is directory
+  long   _filesize;           // jar file size, -1 if is directory
+  bool is_dir() {
+    return _filesize == -1;
+  }
+};
+
 class FileMapInfo : public CHeapObj<mtInternal> {
 private:
+  friend class ManifestStream;
   enum {
     _invalid_version = -1,
-    _current_version = 1
+    _current_version = 2
   };
 
   bool  _file_open;
   int   _fd;
   long  _file_offset;
 
+private:
+  static SharedClassPathEntry* _classpath_entry_table;
+  static int                   _classpath_entry_table_size;
+  static size_t                _classpath_entry_size;
+  static bool                  _validating_classpath_entry_table;
+
   // FileMapHeader describes the shared space data in the file to be
   // mapped.  This structure gets written to a file.  It is not a class, so
   // that the compilers don't add any compiler-private data to it.
 
-  struct FileMapHeader {
+public:
+  struct FileMapHeaderBase : public CHeapObj<mtClass> {
+    virtual bool validate() = 0;
+    virtual void populate(FileMapInfo* info, size_t alignment) = 0;
+  };
+  struct FileMapHeader : FileMapHeaderBase {
+    // Use data() and data_size() to memcopy to/from the FileMapHeader. We need to
+    // avoid read/writing the C++ vtable pointer.
+    static size_t data_size();
+    char* data() {
+      return ((char*)this) + sizeof(FileMapHeaderBase);
+    }
+
     int    _magic;                    // identify file type.
     int    _version;                  // (from enum, above.)
     size_t _alignment;                // how shared archive should be aligned
@@ -78,44 +103,64 @@
     // The following fields are all sanity checks for whether this archive
     // will function correctly with this JVM and the bootclasspath it's
     // invoked with.
-    char  _arch[JVM_ARCH_MAX];            // architecture
     char  _jvm_ident[JVM_IDENT_MAX];      // identifier for jvm
-    int   _num_jars;              // Number of jars in bootclasspath
 
-    // Per jar file data:  timestamp, size.
+    // The _paths_misc_info is a variable-size structure that records "miscellaneous"
+    // information during dumping. It is generated and validated by the
+    // SharedPathsMiscInfo class. See SharedPathsMiscInfo.hpp and sharedClassUtil.hpp for
+    // detailed description.
+    //
+    // The _paths_misc_info data is stored as a byte array in the archive file header,
+    // immediately after the _header field. This information is used only when
+    // checking the validity of the archive and is deallocated after the archive is loaded.
+    //
+    // Note that the _paths_misc_info does NOT include information for JAR files
+    // that existed during dump time. Their information is stored in _classpath_entry_table.
+    int _paths_misc_info_size;
 
-    struct {
-      time_t _timestamp;          // jar timestamp.
-      long   _filesize;           // jar file size.
-    } _jar[JVM_SHARED_JARS_MAX];
-  } _header;
+    // The following is a table of all the class path entries that were used
+    // during dumping. At run time, we require these files to exist and have the same
+    // size/modification time, or else the archive will refuse to load.
+    //
+    // All of these entries must be JAR files. The dumping process would fail if a non-empty
+    // directory was specified in the classpaths. If an empty directory was specified
+    // it is checked by the _paths_misc_info as described above.
+    //
+    // FIXME -- if JAR files in the tail of the list were specified but not used during dumping,
+    // they should be removed from this table, to save space and to avoid spurious
+    // loading failures during runtime.
+    int _classpath_entry_table_size;
+    size_t _classpath_entry_size;
+    SharedClassPathEntry* _classpath_entry_table;
+
+    virtual bool validate();
+    virtual void populate(FileMapInfo* info, size_t alignment);
+  };
+
+  FileMapHeader * _header;
+
   const char* _full_path;
+  char* _paths_misc_info;
 
   static FileMapInfo* _current_info;
 
   bool  init_from_file(int fd);
   void  align_file_position();
+  bool  validate_header_impl();
 
 public:
-  FileMapInfo() {
-    _file_offset = 0;
-    _file_open = false;
-    _header._version = _invalid_version;
-  }
+  FileMapInfo();
+  ~FileMapInfo();
 
   static int current_version()        { return _current_version; }
   void   populate_header(size_t alignment);
-  bool   validate();
+  bool   validate_header();
   void   invalidate();
-  int    version()                    { return _header._version; }
-  size_t alignment()                  { return _header._alignment; }
-  size_t space_capacity(int i)        { return _header._space[i]._capacity; }
-  char*  region_base(int i)           { return _header._space[i]._base; }
-  struct FileMapHeader* header()      { return &_header; }
-
-  static void set_current_info(FileMapInfo* info) {
-    CDS_ONLY(_current_info = info;)
-  }
+  int    version()                    { return _header->_version; }
+  size_t alignment()                  { return _header->_alignment; }
+  size_t space_capacity(int i)        { return _header->_space[i]._capacity; }
+  char*  region_base(int i)           { return _header->_space[i]._base; }
+  struct FileMapHeader* header()      { return _header; }
 
   static FileMapInfo* current_info() {
     CDS_ONLY(return _current_info;)
@@ -146,7 +191,7 @@
 
   // Errors.
   static void fail_stop(const char *msg, ...);
-  void fail_continue(const char *msg, ...);
+  static void fail_continue(const char *msg, ...);
 
   // Return true if given address is in the mapped shared space.
   bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
@@ -160,6 +205,22 @@
 
   // Stop CDS sharing and unmap CDS regions.
   static void stop_sharing_and_unmap(const char* msg);
+
+  static void allocate_classpath_entry_table();
+  bool validate_classpath_entry_table();
+
+  static SharedClassPathEntry* shared_classpath(int index) {
+    char* p = (char*)_classpath_entry_table;
+    p += _classpath_entry_size * index;
+    return (SharedClassPathEntry*)p;
+  }
+  static const char* shared_classpath_name(int index) {
+    return shared_classpath(index)->_name;
+  }
+
+  static int get_number_of_share_classpaths() {
+    return _classpath_entry_table_size;
+  }
 };
 
 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -708,15 +708,18 @@
 #else  // INCLUDE_ALL_GCS
     ShouldNotReachHere();
 #endif // INCLUDE_ALL_GCS
+  } else if (cause == GCCause::_wb_young_gc) {
+    // minor collection for WhiteBox API
+    collect(cause, 0);
   } else {
 #ifdef ASSERT
-    if (cause == GCCause::_scavenge_alot) {
-      // minor collection only
-      collect(cause, 0);
-    } else {
-      // Stop-the-world full collection
-      collect(cause, n_gens() - 1);
-    }
+  if (cause == GCCause::_scavenge_alot) {
+    // minor collection only
+    collect(cause, 0);
+  } else {
+    // Stop-the-world full collection
+    collect(cause, n_gens() - 1);
+  }
 #else
     // Stop-the-world full collection
     collect(cause, n_gens() - 1);
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -79,6 +79,12 @@
   // Deallocation method for metadata
   template <class T>
   static void free_metadata(ClassLoaderData* loader_data, T md) {
+    if (DumpSharedSpaces) {
+      // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
+      // Disable for now -- this means if you specify bad classes in your classlist you
+      // may have wasted space inside the archive.
+      return;
+    }
     if (md != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
       int size = md->size();
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -413,6 +413,7 @@
 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 
+#if INCLUDE_CDS
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   // configurable address, generally at the top of the Java heap so other
   // memory addresses don't conflict.
@@ -428,7 +429,9 @@
       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
     MetaspaceShared::set_shared_rs(&_rs);
-  } else {
+  } else
+#endif
+  {
     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 
     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
@@ -2939,11 +2942,14 @@
   // between the lower base and higher address.
   address lower_base;
   address higher_address;
+#if INCLUDE_CDS
   if (UseSharedSpaces) {
     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                           (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
-  } else {
+  } else
+#endif
+  {
     higher_address = metaspace_base + compressed_class_space_size();
     lower_base = metaspace_base;
 
@@ -2964,6 +2970,7 @@
   }
 }
 
+#if INCLUDE_CDS
 // Return TRUE if the specified metaspace_base and cds_base are close enough
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
@@ -2974,6 +2981,7 @@
                                 (address)(metaspace_base + compressed_class_space_size()));
   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 }
+#endif
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
@@ -2993,6 +3001,7 @@
                                              large_pages,
                                              requested_addr, 0);
   if (!metaspace_rs.is_reserved()) {
+#if INCLUDE_CDS
     if (UseSharedSpaces) {
       size_t increment = align_size_up(1*G, _reserve_alignment);
 
@@ -3007,7 +3016,7 @@
                                      _reserve_alignment, large_pages, addr, 0);
       }
     }
-
+#endif
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
     // metaspace as if UseCompressedClassPointers is off because too much
@@ -3026,12 +3035,13 @@
   // If we got here then the metaspace got allocated.
   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
 
+#if INCLUDE_CDS
   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
     FileMapInfo::stop_sharing_and_unmap(
         "Could not allocate metaspace at a compatible address");
   }
-
+#endif
   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                   UseSharedSpaces ? (address)cds_base : 0);
 
@@ -3115,6 +3125,7 @@
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
+#if INCLUDE_CDS
     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
@@ -3152,23 +3163,22 @@
     }
 
     Universe::set_narrow_klass_shift(0);
-#endif
-
+#endif // _LP64
+#endif // INCLUDE_CDS
   } else {
+#if INCLUDE_CDS
     // If using shared space, open the file that contains the shared space
     // and map in the memory before initializing the rest of metaspace (so
     // the addresses don't conflict)
     address cds_address = NULL;
     if (UseSharedSpaces) {
       FileMapInfo* mapinfo = new FileMapInfo();
-      memset(mapinfo, 0, sizeof(FileMapInfo));
 
       // Open the shared archive file, read and validate the header. If
       // initialization fails, shared spaces [UseSharedSpaces] are
       // disabled and the file is closed.
       // Map in spaces now also
       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
-        FileMapInfo::set_current_info(mapinfo);
         cds_total = FileMapInfo::shared_spaces_size();
         cds_address = (address)mapinfo->region_base(0);
       } else {
@@ -3176,21 +3186,23 @@
                "archive file not closed or shared spaces not disabled.");
       }
     }
-
+#endif // INCLUDE_CDS
 #ifdef _LP64
     // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
+#if INCLUDE_CDS
         char* cds_end = (char*)(cds_address + cds_total);
         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+#endif
       } else {
         char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(base, 0);
       }
     }
-#endif
+#endif // _LP64
 
     // Initialize these before initializing the VirtualSpaceList
     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
@@ -3380,6 +3392,10 @@
   assert(!SafepointSynchronize::is_at_safepoint()
          || Thread::current()->is_VM_thread(), "should be the VM thread");
 
+  if (DumpSharedSpaces && PrintSharedSpaces) {
+    record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
+  }
+
   MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
   if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
@@ -3417,8 +3433,9 @@
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
     }
-
-    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+    if (PrintSharedSpaces) {
+      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+    }
 
     // Zero initialize.
     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
@@ -3517,15 +3534,55 @@
 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
   assert(DumpSharedSpaces, "sanity");
 
-  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+  int byte_size = (int)word_size * HeapWordSize;
+  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
+
   if (_alloc_record_head == NULL) {
     _alloc_record_head = _alloc_record_tail = rec;
-  } else {
+  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
     _alloc_record_tail->_next = rec;
     _alloc_record_tail = rec;
+  } else {
+    // slow linear search, but this doesn't happen that often, and only when dumping
+    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
+      if (old->_ptr == ptr) {
+        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
+        int remain_bytes = old->_byte_size - byte_size;
+        assert(remain_bytes >= 0, "sanity");
+        old->_type = type;
+
+        if (remain_bytes == 0) {
+          delete(rec);
+        } else {
+          address remain_ptr = address(ptr) + byte_size;
+          rec->_ptr = remain_ptr;
+          rec->_byte_size = remain_bytes;
+          rec->_type = MetaspaceObj::DeallocatedType;
+          rec->_next = old->_next;
+          old->_byte_size = byte_size;
+          old->_next = rec;
+        }
+        return;
+      }
+    }
+    assert(0, "reallocating a freed pointer that was not recorded");
   }
 }
 
+void Metaspace::record_deallocation(void* ptr, size_t word_size) {
+  assert(DumpSharedSpaces, "sanity");
+
+  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+    if (rec->_ptr == ptr) {
+      assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
+      rec->_type = MetaspaceObj::DeallocatedType;
+      return;
+    }
+  }
+
+  assert(0, "deallocating a pointer that was not recorded");
+}
+
 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
 
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -171,9 +171,10 @@
   static const MetaspaceTracer* tracer() { return _tracer; }
 
  private:
-  // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
+  // These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
   // maintain a single list for now.
   void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
+  void record_deallocation(void* ptr, size_t word_size);
 
 #ifdef _LP64
   static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,7 @@
 #include "classfile/dictionary.hpp"
 #include "classfile/loaderConstraints.hpp"
 #include "classfile/placeholders.hpp"
+#include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
@@ -47,6 +48,10 @@
 
 ReservedSpace* MetaspaceShared::_shared_rs = NULL;
 
+bool MetaspaceShared::_link_classes_made_progress;
+bool MetaspaceShared::_check_classes_made_progress;
+bool MetaspaceShared::_has_error_classes;
+bool MetaspaceShared::_archive_loading_failed = false;
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
@@ -446,6 +451,23 @@
   SystemDictionary::classes_do(collect_classes);
 
   tty->print_cr("Number of classes %d", _global_klass_objects->length());
+  {
+    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
+    for (int i = 0; i < _global_klass_objects->length(); i++) {
+      Klass* k = _global_klass_objects->at(i);
+      if (k->oop_is_instance()) {
+        num_inst ++;
+      } else if (k->oop_is_objArray()) {
+        num_obj_array ++;
+      } else {
+        assert(k->oop_is_typeArray(), "sanity");
+        num_type_array ++;
+      }
+    }
+    tty->print_cr("    instance classes   = %5d", num_inst);
+    tty->print_cr("    obj array classes  = %5d", num_obj_array);
+    tty->print_cr("    type array classes = %5d", num_type_array);
+  }
 
   // Update all the fingerprints in the shared methods.
   tty->print("Calculating fingerprints ... ");
@@ -611,38 +633,58 @@
 #undef fmt_space
 }
 
-static void link_shared_classes(Klass* obj, TRAPS) {
+
+void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) {
   Klass* k = obj;
   if (k->oop_is_instance()) {
     InstanceKlass* ik = (InstanceKlass*) k;
     // Link the class to cause the bytecodes to be rewritten and the
-    // cpcache to be created.
-    if (ik->init_state() < InstanceKlass::linked) {
-      ik->link_class(THREAD);
-      guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
+    // cpcache to be created. Class verification is done according
+    // to -Xverify setting.
+    _link_classes_made_progress |= try_link_class(ik, THREAD);
+    guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+  }
+}
+
+void MetaspaceShared::check_one_shared_class(Klass* k) {
+  if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) {
+    _check_classes_made_progress = true;
+  }
+}
+
+void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
+  // We need to iterate because verification may cause additional classes
+  // to be loaded.
+  do {
+    _link_classes_made_progress = false;
+    SystemDictionary::classes_do(link_one_shared_class, THREAD);
+    guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+  } while (_link_classes_made_progress);
+
+  if (_has_error_classes) {
+    // Mark all classes whose super class or interfaces failed verification.
+    do {
+      // Not completely sure if we need to do this iteratively. Anyway,
+      // we should come here only if there are unverifiable classes, which
+      // shouldn't happen in normal cases. So better safe than sorry.
+      _check_classes_made_progress = false;
+      SystemDictionary::classes_do(check_one_shared_class);
+    } while (_check_classes_made_progress);
+
+    if (IgnoreUnverifiableClassesDuringDump) {
+      // This is useful when running JCK or SQE tests. You should not
+      // enable this when running real apps.
+      SystemDictionary::remove_classes_in_error_state();
+    } else {
+      tty->print_cr("Please remove the unverifiable classes from your class list and try again");
+      exit(1);
     }
   }
 }
 
-
-// Support for a simple checksum of the contents of the class list
-// file to prevent trivial tampering. The algorithm matches that in
-// the MakeClassList program used by the J2SE build process.
-#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
-static jlong
-jsum(jlong start, const char *buf, const int len)
-{
-    jlong h = start;
-    char *p = (char *)buf, *e = p + len;
-    while (p < e) {
-        char c = *p++;
-        if (c <= ' ') {
-            /* Skip spaces and control characters */
-            continue;
-        }
-        h = 31 * h + c;
-    }
-    return h;
+void MetaspaceShared::prepare_for_dumping() {
+  ClassLoader::initialize_shared_path();
+  FileMapInfo::allocate_classpath_entry_table();
 }
 
 // Preload classes from a list, populate the shared spaces and dump to a
@@ -651,72 +693,112 @@
   TraceTime timer("Dump Shared Spaces", TraceStartupTime);
   ResourceMark rm;
 
+  tty->print_cr("Allocated shared space: %d bytes at " PTR_FORMAT,
+                MetaspaceShared::shared_rs()->size(),
+                MetaspaceShared::shared_rs()->base());
+
   // Preload classes to be shared.
   // Should use some os:: method rather than fopen() here. aB.
-  // Construct the path to the class list (in jre/lib)
-  // Walk up two directories from the location of the VM and
-  // optionally tack on "lib" (depending on platform)
-  char class_list_path[JVM_MAXPATHLEN];
-  os::jvm_path(class_list_path, sizeof(class_list_path));
-  for (int i = 0; i < 3; i++) {
-    char *end = strrchr(class_list_path, *os::file_separator());
-    if (end != NULL) *end = '\0';
+  const char* class_list_path;
+  if (SharedClassListFile == NULL) {
+    // Construct the path to the class list (in jre/lib)
+    // Walk up two directories from the location of the VM and
+    // optionally tack on "lib" (depending on platform)
+    char class_list_path_str[JVM_MAXPATHLEN];
+    os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
+    for (int i = 0; i < 3; i++) {
+      char *end = strrchr(class_list_path_str, *os::file_separator());
+      if (end != NULL) *end = '\0';
+    }
+    int class_list_path_len = (int)strlen(class_list_path_str);
+    if (class_list_path_len >= 3) {
+      if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
+        strcat(class_list_path_str, os::file_separator());
+        strcat(class_list_path_str, "lib");
+      }
+    }
+    strcat(class_list_path_str, os::file_separator());
+    strcat(class_list_path_str, "classlist");
+    class_list_path = class_list_path_str;
+  } else {
+    class_list_path = SharedClassListFile;
   }
-  int class_list_path_len = (int)strlen(class_list_path);
-  if (class_list_path_len >= 3) {
-    if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
-      strcat(class_list_path, os::file_separator());
-      strcat(class_list_path, "lib");
-    }
+
+  int class_count = 0;
+  GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
+
+  // sun.io.Converters
+  static const char obj_array_sig[] = "[[Ljava/lang/Object;";
+  SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
+
+  // java.util.HashMap
+  static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
+  SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
+
+  tty->print_cr("Loading classes to share ...");
+  _has_error_classes = false;
+  class_count += preload_and_dump(class_list_path, class_promote_order,
+                                  THREAD);
+  if (ExtraSharedClassListFile) {
+    class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order,
+                                    THREAD);
+  }
+  tty->print_cr("Loading classes to share: done.");
+
+  if (PrintSharedSpaces) {
+    tty->print_cr("Shared spaces: preloaded %d classes", class_count);
   }
-  strcat(class_list_path, os::file_separator());
-  strcat(class_list_path, "classlist");
+
+  // Rewrite and link classes
+  tty->print_cr("Rewriting and linking classes ...");
+
+  // Link any classes which got missed. This would happen if we have loaded classes that
+  // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
+  // fails verification, all other interfaces that were not specified in the classlist but
+  // are implemented by K are not verified.
+  link_and_cleanup_shared_classes(CATCH);
+  tty->print_cr("Rewriting and linking classes: done");
 
+  // Create and dump the shared spaces.   Everything so far is loaded
+  // with the null class loader.
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
+  VMThread::execute(&op);
+
+  // Since various initialization steps have been undone by this process,
+  // it is not reasonable to continue running a java process.
+  exit(0);
+}
+
+int MetaspaceShared::preload_and_dump(const char * class_list_path,
+                                      GrowableArray<Klass*>* class_promote_order,
+                                      TRAPS) {
   FILE* file = fopen(class_list_path, "r");
+  char class_name[256];
+  int class_count = 0;
+
   if (file != NULL) {
-    jlong computed_jsum  = JSUM_SEED;
-    jlong file_jsum      = 0;
-
-    char class_name[256];
-    int class_count = 0;
-    GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
-
-    // sun.io.Converters
-    static const char obj_array_sig[] = "[[Ljava/lang/Object;";
-    SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
-
-    // java.util.HashMap
-    static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
-    SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
-
-    tty->print("Loading classes to share ... ");
     while ((fgets(class_name, sizeof class_name, file)) != NULL) {
-      if (*class_name == '#') {
-        jint fsh, fsl;
-        if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
-          file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
-        }
-
+      if (*class_name == '#') { // comment
         continue;
       }
       // Remove trailing newline
       size_t name_len = strlen(class_name);
-      class_name[name_len-1] = '\0';
-
-      computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
+      if (class_name[name_len-1] == '\n') {
+        class_name[name_len-1] = '\0';
+      }
 
       // Got a class name - load it.
       TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
       guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
       Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
                                                          THREAD);
-      guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
+      CLEAR_PENDING_EXCEPTION;
       if (klass != NULL) {
         if (PrintSharedSpaces && Verbose && WizardMode) {
           tty->print_cr("Shared spaces preloaded: %s", class_name);
         }
 
-
         InstanceKlass* ik = InstanceKlass::cast(klass);
 
         // Should be class load order as per -XX:+TraceClassLoadingPreorder
@@ -726,52 +808,14 @@
         // cpcache to be created. The linking is done as soon as classes
         // are loaded in order that the related data structures (klass and
         // cpCache) are located together.
-
-        if (ik->init_state() < InstanceKlass::linked) {
-          ik->link_class(THREAD);
-          guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
-        }
-
-        // TODO: Resolve klasses in constant pool
-        ik->constants()->resolve_class_constants(THREAD);
+        try_link_class(ik, THREAD);
+        guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
 
         class_count++;
       } else {
-        if (PrintSharedSpaces && Verbose && WizardMode) {
-          tty->cr();
-          tty->print_cr(" Preload failed: %s", class_name);
-        }
+        //tty->print_cr("Preload failed: %s", class_name);
       }
-      file_jsum = 0; // Checksum must be on last line of file
-    }
-    if (computed_jsum != file_jsum) {
-      tty->cr();
-      tty->print_cr("Preload failed: checksum of class list was incorrect.");
-      exit(1);
-    }
-
-    tty->print_cr("done. ");
-
-    if (PrintSharedSpaces) {
-      tty->print_cr("Shared spaces: preloaded %d classes", class_count);
     }
-
-    // Rewrite and unlink classes.
-    tty->print("Rewriting and linking classes ... ");
-
-    // Link any classes which got missed.  (It's not quite clear why
-    // they got missed.)  This iteration would be unsafe if we weren't
-    // single-threaded at this point; however we can't do it on the VM
-    // thread because it requires object allocation.
-    SystemDictionary::classes_do(link_shared_classes, CATCH);
-    tty->print_cr("done. ");
-
-    // Create and dump the shared spaces.   Everything so far is loaded
-    // with the null class loader.
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-    VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
-    VMThread::execute(&op);
-
   } else {
     char errmsg[JVM_MAXPATHLEN];
     os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -779,11 +823,39 @@
     exit(1);
   }
 
-  // Since various initialization steps have been undone by this process,
-  // it is not reasonable to continue running a java process.
-  exit(0);
+  return class_count;
 }
 
+// Returns true if the class's status has changed
+bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
+  assert(DumpSharedSpaces, "should only be called during dumping");
+  if (ik->init_state() < InstanceKlass::linked) {
+    bool saved = BytecodeVerificationLocal;
+    if (!SharedClassUtil::is_shared_boot_class(ik)) {
+      // The verification decision is based on BytecodeVerificationRemote
+      // for non-system classes. Since we are using the NULL classloader
+      // to load non-system classes during dumping, we need to temporarily
+      // change BytecodeVerificationLocal to be the same as
+      // BytecodeVerificationRemote. Note this can cause the parent system
+      // classes also being verified. The extra overhead is acceptable during
+      // dumping.
+      BytecodeVerificationLocal = BytecodeVerificationRemote;
+    }
+    ik->link_class(THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      ResourceMark rm;
+      tty->print_cr("Preload Error: Verification failed for %s",
+                    ik->external_name());
+      CLEAR_PENDING_EXCEPTION;
+      ik->set_in_error_state();
+      _has_error_classes = true;
+    }
+    BytecodeVerificationLocal = saved;
+    return true;
+  } else {
+    return false;
+  }
+}
 
 // Closure for serializing initialization data in from a data area
 // (ptr_array) read from the shared file.
@@ -867,7 +939,8 @@
       (_rw_base = mapinfo->map_region(rw)) != NULL &&
       (_md_base = mapinfo->map_region(md)) != NULL &&
       (_mc_base = mapinfo->map_region(mc)) != NULL &&
-      (image_alignment == (size_t)max_alignment())) {
+      (image_alignment == (size_t)max_alignment()) &&
+      mapinfo->validate_classpath_entry_table()) {
     // Success (no need to do anything)
     return true;
   } else {
@@ -884,7 +957,7 @@
     // If -Xshare:on is specified, print out the error message and exit VM,
     // otherwise, set UseSharedSpaces to false and continue.
     if (RequireSharedSpaces) {
-      vm_exit_during_initialization("Unable to use shared archive.", NULL);
+      vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
     } else {
       FLAG_SET_DEFAULT(UseSharedSpaces, false);
     }
@@ -984,6 +1057,20 @@
 
   // Close the mapinfo file
   mapinfo->close();
+
+  if (PrintSharedArchiveAndExit) {
+    if (PrintSharedDictionary) {
+      tty->print_cr("\nShared classes:\n");
+      SystemDictionary::print_shared(false);
+    }
+    if (_archive_loading_failed) {
+      tty->print_cr("archive is invalid");
+      vm_exit(1);
+    } else {
+      tty->print_cr("archive is valid");
+      vm_exit(0);
+    }
+  }
 }
 
 // JVM/TI RedefineClasses() support:
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -38,7 +38,10 @@
   // CDS support
   static ReservedSpace* _shared_rs;
   static int _max_alignment;
-
+  static bool _link_classes_made_progress;
+  static bool _check_classes_made_progress;
+  static bool _has_error_classes;
+  static bool _archive_loading_failed;
  public:
   enum {
     vtbl_list_size = 17, // number of entries in the shared space vtable list.
@@ -67,7 +70,11 @@
     NOT_CDS(return 0);
   }
 
+  static void prepare_for_dumping() NOT_CDS_RETURN;
   static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
+  static int preload_and_dump(const char * class_list_path,
+                              GrowableArray<Klass*>* class_promote_order,
+                              TRAPS) NOT_CDS_RETURN;
 
   static ReservedSpace* shared_rs() {
     CDS_ONLY(return _shared_rs);
@@ -78,6 +85,9 @@
     CDS_ONLY(_shared_rs = rs;)
   }
 
+  static void set_archive_loading_failed() {
+    _archive_loading_failed = true;
+  }
   static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
   static void initialize_shared_spaces() NOT_CDS_RETURN;
 
@@ -97,5 +107,10 @@
   static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
 
   static void print_shared_spaces();
+
+  static bool try_link_class(InstanceKlass* ik, TRAPS);
+  static void link_one_shared_class(Klass* obj, TRAPS);
+  static void check_one_shared_class(Klass* obj);
+  static void link_and_cleanup_shared_classes(TRAPS);
 };
 #endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
--- a/hotspot/src/share/vm/memory/universe.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,9 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#endif
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -34,6 +37,7 @@
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/cardTableModRefBS.hpp"
+#include "memory/filemap.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genRemSet.hpp"
@@ -239,8 +243,9 @@
 void initialize_basic_type_klass(Klass* k, TRAPS) {
   Klass* ok = SystemDictionary::Object_klass();
   if (UseSharedSpaces) {
+    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
     assert(k->super() == ok, "u3");
-    k->restore_unshareable_info(CHECK);
+    k->restore_unshareable_info(loader_data, Handle(), CHECK);
   } else {
     k->initialize_supers(ok, CHECK);
   }
@@ -666,6 +671,10 @@
     SymbolTable::create_table();
     StringTable::create_table();
     ClassLoader::create_package_info_table();
+
+    if (DumpSharedSpaces) {
+      MetaspaceShared::prepare_for_dumping();
+    }
   }
 
   return JNI_OK;
@@ -1155,6 +1164,11 @@
   MemoryService::add_metaspace_memory_pools();
 
   MemoryService::set_universe_heap(Universe::_collectedHeap);
+#if INCLUDE_CDS
+  if (UseSharedSpaces) {
+    SharedClassUtil::initialize(CHECK_false);
+  }
+#endif
   return true;
 }
 
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -186,8 +186,9 @@
   set_component_mirror(NULL);
 }
 
-void ArrayKlass::restore_unshareable_info(TRAPS) {
-  Klass::restore_unshareable_info(CHECK);
+void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+  assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
+  Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
   // Klass recreates the component mirror also
 }
 
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -137,7 +137,7 @@
 
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // Printing
   void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -2303,12 +2303,14 @@
   array_klasses_do(remove_unshareable_in_class);
 }
 
-void restore_unshareable_in_class(Klass* k, TRAPS) {
-  k->restore_unshareable_info(CHECK);
+static void restore_unshareable_in_class(Klass* k, TRAPS) {
+  // Array classes have null protection domain.
+  // --> see ArrayKlass::complete_create_array_klass()
+  k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
 }
 
-void InstanceKlass::restore_unshareable_info(TRAPS) {
-  Klass::restore_unshareable_info(CHECK);
+void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+  Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
   instanceKlassHandle ik(THREAD, this);
 
   Array<Method*>* methods = ik->methods();
@@ -2334,6 +2336,38 @@
   ik->array_klasses_do(restore_unshareable_in_class, CHECK);
 }
 
+// returns true IFF is_in_error_state() has been changed as a result of this call.
+bool InstanceKlass::check_sharing_error_state() {
+  assert(DumpSharedSpaces, "should only be called during dumping");
+  bool old_state = is_in_error_state();
+
+  if (!is_in_error_state()) {
+    bool bad = false;
+    for (InstanceKlass* sup = java_super(); sup; sup = sup->java_super()) {
+      if (sup->is_in_error_state()) {
+        bad = true;
+        break;
+      }
+    }
+    if (!bad) {
+      Array<Klass*>* interfaces = transitive_interfaces();
+      for (int i = 0; i < interfaces->length(); i++) {
+        Klass* iface = interfaces->at(i);
+        if (InstanceKlass::cast(iface)->is_in_error_state()) {
+          bad = true;
+          break;
+        }
+      }
+    }
+
+    if (bad) {
+      set_in_error_state();
+    }
+  }
+
+  return (old_state != is_in_error_state());
+}
+
 static void clear_all_breakpoints(Method* m) {
   m->clear_all_breakpoints();
 }
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -980,6 +980,13 @@
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
 
+public:
+  void set_in_error_state() {
+    assert(DumpSharedSpaces, "only call this when dumping archive");
+    _init_state = initialization_error;
+  }
+  bool check_sharing_error_state();
+
 private:
   // initialization state
 #ifdef ASSERT
@@ -1038,7 +1045,7 @@
 public:
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // jvm support
   jint compute_modifier_flags(TRAPS) const;
--- a/hotspot/src/share/vm/oops/klass.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -184,6 +184,7 @@
   // The klass doesn't have any references at this point.
   clear_modified_oops();
   clear_accumulated_modified_oops();
+  _shared_class_path_index = -1;
 }
 
 jint Klass::array_layout_helper(BasicType etype) {
@@ -500,13 +501,12 @@
   set_class_loader_data(NULL);
 }
 
-void Klass::restore_unshareable_info(TRAPS) {
+void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
   TRACE_INIT_ID(this);
   // If an exception happened during CDS restore, some of these fields may already be
   // set.  We leave the class on the CLD list, even if incomplete so that we don't
   // modify the CLD list outside a safepoint.
   if (class_loader_data() == NULL) {
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
     // Restore class_loader_data to the null class loader data
     set_class_loader_data(loader_data);
 
@@ -515,12 +515,12 @@
     loader_data->add_class(this);
   }
 
-  // Recreate the class mirror.  The protection_domain is always null for
-  // boot loader, for now.
+  // Recreate the class mirror.
   // Only recreate it if not present.  A previous attempt to restore may have
   // gotten an OOM later but keep the mirror if it was created.
   if (java_mirror() == NULL) {
-    java_lang_Class::create_mirror(this, Handle(NULL), Handle(NULL), CHECK);
+    Handle loader = loader_data->class_loader();
+    java_lang_Class::create_mirror(this, loader, protection_domain, CHECK);
   }
 }
 
--- a/hotspot/src/share/vm/oops/klass.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -147,6 +147,16 @@
   jbyte _modified_oops;             // Card Table Equivalent (YC/CMS support)
   jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
 
+private:
+  // This is an index into FileMapHeader::_classpath_entry_table[], to
+  // associate this class with the JAR file where it's loaded from during
+  // dump time. If a class is not loaded from the shared archive, this field is
+  // -1.
+  jshort _shared_class_path_index;
+
+  friend class SharedClassUtil;
+protected:
+
   // Constructor
   Klass();
 
@@ -253,6 +263,15 @@
   void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
   bool has_accumulated_modified_oops()   { return _accumulated_modified_oops == 1; }
 
+  int shared_classpath_index() const   {
+    return _shared_class_path_index;
+  };
+
+  void set_shared_classpath_index(int index) {
+    _shared_class_path_index = index;
+  };
+
+
  protected:                                // internal accessors
   void     set_subklass(Klass* s);
   void     set_next_sibling(Klass* s);
@@ -422,7 +441,7 @@
  public:
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
  protected:
   // computes the subtype relationship
--- a/hotspot/src/share/vm/opto/callnode.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -1815,3 +1815,57 @@
   }
   return result;
 }
+
+ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
+  : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM), _alloc_tightly_coupled(alloc_tightly_coupled), _kind(ArrayCopy) {
+  init_class_id(Class_ArrayCopy);
+  init_flags(Flag_is_macro);
+  C->add_macro_node(this);
+}
+
+uint ArrayCopyNode::size_of() const { return sizeof(*this); }
+
+ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
+                                   Node* src, Node* src_offset,
+                                   Node* dest, Node* dest_offset,
+                                   Node* length,
+                                   bool alloc_tightly_coupled,
+                                   Node* src_length, Node* dest_length,
+                                   Node* src_klass, Node* dest_klass) {
+
+  ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled);
+  Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
+
+  ac->init_req(ArrayCopyNode::Src, src);
+  ac->init_req(ArrayCopyNode::SrcPos, src_offset);
+  ac->init_req(ArrayCopyNode::Dest, dest);
+  ac->init_req(ArrayCopyNode::DestPos, dest_offset);
+  ac->init_req(ArrayCopyNode::Length, length);
+  ac->init_req(ArrayCopyNode::SrcLen, src_length);
+  ac->init_req(ArrayCopyNode::DestLen, dest_length);
+  ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
+  ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
+
+  if (may_throw) {
+    ac->set_req(TypeFunc::I_O , kit->i_o());
+    kit->add_safepoint_edges(ac, false);
+  }
+
+  return ac;
+}
+
+void ArrayCopyNode::connect_outputs(GraphKit* kit) {
+  kit->set_all_memory_call(this, true);
+  kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
+  kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
+  kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
+  kit->set_all_memory_call(this);
+}
+
+#ifndef PRODUCT
+const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
+void ArrayCopyNode::dump_spec(outputStream *st) const {
+  CallNode::dump_spec(st);
+  st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
+}
+#endif
--- a/hotspot/src/share/vm/opto/callnode.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -1063,4 +1063,108 @@
   virtual bool        guaranteed_safepoint()  { return false; }
 };
 
+class GraphKit;
+
+class ArrayCopyNode : public CallNode {
+private:
+
+  // What kind of arraycopy variant is this?
+  enum {
+    ArrayCopy,       // System.arraycopy()
+    ArrayCopyNoTest, // System.arraycopy(), all arguments validated
+    CloneBasic,      // A clone that can be copied by 64 bit chunks
+    CloneOop,        // An oop array clone
+    CopyOf,          // Arrays.copyOf()
+    CopyOfRange      // Arrays.copyOfRange()
+  } _kind;
+
+#ifndef PRODUCT
+  static const char* _kind_names[CopyOfRange+1];
+#endif
+  // Is the alloc obtained with
+  // AllocateArrayNode::Ideal_array_allocation() tighly coupled
+  // (arraycopy follows immediately the allocation)?
+  // We cache the result of LibraryCallKit::tightly_coupled_allocation
+  // here because it's much easier to find whether there's a tightly
+  // couple allocation at parse time than at macro expansion time. At
+  // macro expansion time, for every use of the allocation node we
+  // would need to figure out whether it happens after the arraycopy (and
+  // can be ignored) or between the allocation and the arraycopy. At
+  // parse time, it's straightforward because whatever happens after
+  // the arraycopy is not parsed yet so doesn't exist when
+  // LibraryCallKit::tightly_coupled_allocation() is called.
+  bool _alloc_tightly_coupled;
+
+  static const TypeFunc* arraycopy_type() {
+    const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
+    fields[Src]       = TypeInstPtr::BOTTOM;
+    fields[SrcPos]    = TypeInt::INT;
+    fields[Dest]      = TypeInstPtr::BOTTOM;
+    fields[DestPos]   = TypeInt::INT;
+    fields[Length]    = TypeInt::INT;
+    fields[SrcLen]    = TypeInt::INT;
+    fields[DestLen]   = TypeInt::INT;
+    fields[SrcKlass]  = TypeKlassPtr::BOTTOM;
+    fields[DestKlass] = TypeKlassPtr::BOTTOM;
+    const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
+
+    // create result type (range)
+    fields = TypeTuple::fields(0);
+
+    const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
+
+    return TypeFunc::make(domain, range);
+  }
+
+  ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
+
+public:
+
+  enum {
+    Src   = TypeFunc::Parms,
+    SrcPos,
+    Dest,
+    DestPos,
+    Length,
+    SrcLen,
+    DestLen,
+    SrcKlass,
+    DestKlass,
+    ParmLimit
+  };
+
+  static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
+                             Node* src, Node* src_offset,
+                             Node* dest,  Node* dest_offset,
+                             Node* length,
+                             bool alloc_tightly_coupled,
+                             Node* src_length = NULL, Node* dest_length = NULL,
+                             Node* src_klass = NULL, Node* dest_klass = NULL);
+
+  void connect_outputs(GraphKit* kit);
+
+  bool is_arraycopy()         const { return _kind == ArrayCopy; }
+  bool is_arraycopy_notest()  const { return _kind == ArrayCopyNoTest; }
+  bool is_clonebasic()        const { return _kind == CloneBasic; }
+  bool is_cloneoop()          const { return _kind == CloneOop; }
+  bool is_copyof()            const { return _kind == CopyOf; }
+  bool is_copyofrange()       const { return _kind == CopyOfRange; }
+
+  void set_arraycopy()         { _kind = ArrayCopy; }
+  void set_arraycopy_notest()  { _kind = ArrayCopyNoTest; }
+  void set_clonebasic()        { _kind = CloneBasic; }
+  void set_cloneoop()          { _kind = CloneOop; }
+  void set_copyof()            { _kind = CopyOf; }
+  void set_copyofrange()       { _kind = CopyOfRange; }
+
+  virtual int Opcode() const;
+  virtual uint size_of() const; // Size is bigger
+  virtual bool guaranteed_safepoint()  { return false; }
+
+  bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
+
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const;
+#endif
+};
 #endif // SHARE_VM_OPTO_CALLNODE_HPP
--- a/hotspot/src/share/vm/opto/classes.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -37,6 +37,7 @@
 macro(AllocateArray)
 macro(AndI)
 macro(AndL)
+macro(ArrayCopy)
 macro(AryEq)
 macro(AtanD)
 macro(Binary)
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -3795,6 +3795,56 @@
   }
 }
 
+//----------------------------static_subtype_check-----------------------------
+// Shortcut important common cases when superklass is exact:
+// (0) superklass is java.lang.Object (can occur in reflective code)
+// (1) subklass is already limited to a subtype of superklass => always ok
+// (2) subklass does not overlap with superklass => always fail
+// (3) superklass has NO subtypes and we can check with a simple compare.
+int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
+  if (StressReflectiveCode) {
+    return SSC_full_test;       // Let caller generate the general case.
+  }
+
+  if (superk == env()->Object_klass()) {
+    return SSC_always_true;     // (0) this test cannot fail
+  }
+
+  ciType* superelem = superk;
+  if (superelem->is_array_klass())
+    superelem = superelem->as_array_klass()->base_element_type();
+
+  if (!subk->is_interface()) {  // cannot trust static interface types yet
+    if (subk->is_subtype_of(superk)) {
+      return SSC_always_true;   // (1) false path dead; no dynamic test needed
+    }
+    if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
+        !superk->is_subtype_of(subk)) {
+      return SSC_always_false;
+    }
+  }
+
+  // If casting to an instance klass, it must have no subtypes
+  if (superk->is_interface()) {
+    // Cannot trust interfaces yet.
+    // %%% S.B. superk->nof_implementors() == 1
+  } else if (superelem->is_instance_klass()) {
+    ciInstanceKlass* ik = superelem->as_instance_klass();
+    if (!ik->has_subklass() && !ik->is_interface()) {
+      if (!ik->is_final()) {
+        // Add a dependency if there is a chance of a later subclass.
+        dependencies()->assert_leaf_type(ik);
+      }
+      return SSC_easy_test;     // (3) caller can do a simple ptr comparison
+    }
+  } else {
+    // A primitive array type has no subtypes.
+    return SSC_easy_test;       // (3) caller can do a simple ptr comparison
+  }
+
+  return SSC_full_test;
+}
+
 // The message about the current inlining is accumulated in
 // _print_inlining_stream and transfered into the _print_inlining_list
 // once we know whether inlining succeeds or not. For regular
--- a/hotspot/src/share/vm/opto/compile.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -1200,6 +1200,10 @@
   // Definitions of pd methods
   static void pd_compiler2_init();
 
+  // Static parse-time type checking logic for gen_subtype_check:
+  enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
+  int static_subtype_check(ciKlass* superk, ciKlass* subk);
+
   // Auxiliary method for randomized fuzzing/stressing
   static bool randomized_select(int count);
 };
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -2520,6 +2520,21 @@
   set_control(norm);
 }
 
+static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN* gvn, BasicType bt) {
+  Node* cmp = NULL;
+  switch(bt) {
+  case T_INT: cmp = new CmpINode(in1, in2); break;
+  case T_ADDRESS: cmp = new CmpPNode(in1, in2); break;
+  default: fatal(err_msg("unexpected comparison type %s", type2name(bt)));
+  }
+  gvn->transform(cmp);
+  Node* bol = gvn->transform(new BoolNode(cmp, test));
+  IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN);
+  gvn->transform(iff);
+  if (!bol->is_Con()) gvn->record_for_igvn(iff);
+  return iff;
+}
+
 
 //-------------------------------gen_subtype_check-----------------------------
 // Generate a subtyping check.  Takes as input the subtype and supertype.
@@ -2529,16 +2544,17 @@
 // but that's not exposed to the optimizer.  This call also doesn't take in an
 // Object; if you wish to check an Object you need to load the Object's class
 // prior to coming here.
-Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
+Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, MergeMemNode* mem, PhaseGVN* gvn) {
+  Compile* C = gvn->C;
   // Fast check for identical types, perhaps identical constants.
   // The types can even be identical non-constants, in cases
   // involving Array.newInstance, Object.clone, etc.
   if (subklass == superklass)
-    return top();             // false path is dead; no test needed.
-
-  if (_gvn.type(superklass)->singleton()) {
-    ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
-    ciKlass* subk   = _gvn.type(subklass)->is_klassptr()->klass();
+    return C->top();             // false path is dead; no test needed.
+
+  if (gvn->type(superklass)->singleton()) {
+    ciKlass* superk = gvn->type(superklass)->is_klassptr()->klass();
+    ciKlass* subk   = gvn->type(subklass)->is_klassptr()->klass();
 
     // In the common case of an exact superklass, try to fold up the
     // test before generating code.  You may ask, why not just generate
@@ -2549,25 +2565,23 @@
     //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
     // Here, the type of 'fa' is often exact, so the store check
     // of fa[1]=x will fold up, without testing the nullness of x.
-    switch (static_subtype_check(superk, subk)) {
-    case SSC_always_false:
+    switch (C->static_subtype_check(superk, subk)) {
+    case Compile::SSC_always_false:
       {
-        Node* always_fail = control();
-        set_control(top());
+        Node* always_fail = *ctrl;
+        *ctrl = gvn->C->top();
         return always_fail;
       }
-    case SSC_always_true:
-      return top();
-    case SSC_easy_test:
+    case Compile::SSC_always_true:
+      return C->top();
+    case Compile::SSC_easy_test:
       {
         // Just do a direct pointer compare and be done.
-        Node* cmp = _gvn.transform( new CmpPNode(subklass, superklass) );
-        Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );
-        IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
-        set_control( _gvn.transform( new IfTrueNode (iff) ) );
-        return       _gvn.transform( new IfFalseNode(iff) );
+        IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
+        *ctrl = gvn->transform(new IfTrueNode(iff));
+        return gvn->transform(new IfFalseNode(iff));
       }
-    case SSC_full_test:
+    case Compile::SSC_full_test:
       break;
     default:
       ShouldNotReachHere();
@@ -2579,11 +2593,11 @@
   // will always succeed.  We could leave a dependency behind to ensure this.
 
   // First load the super-klass's check-offset
-  Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
-  Node *chk_off = _gvn.transform(new LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
-                                                   TypeInt::INT, MemNode::unordered));
+  Node *p1 = gvn->transform(new AddPNode(superklass, superklass, gvn->MakeConX(in_bytes(Klass::super_check_offset_offset()))));
+  Node* m = mem->memory_at(C->get_alias_index(gvn->type(p1)->is_ptr()));
+  Node *chk_off = gvn->transform(new LoadINode(NULL, m, p1, gvn->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
   int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
-  bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
+  bool might_be_cache = (gvn->find_int_con(chk_off, cacheoff_con) == cacheoff_con);
 
   // Load from the sub-klass's super-class display list, or a 1-word cache of
   // the secondary superclass list, or a failing value with a sentinel offset
@@ -2591,42 +2605,44 @@
   // hierarchy and we have to scan the secondary superclass list the hard way.
   // Worst-case type is a little odd: NULL is allowed as a result (usually
   // klass loads can never produce a NULL).
-  Node *chk_off_X = ConvI2X(chk_off);
-  Node *p2 = _gvn.transform( new AddPNode(subklass,subklass,chk_off_X) );
+  Node *chk_off_X = chk_off;
+#ifdef _LP64
+  chk_off_X = gvn->transform(new ConvI2LNode(chk_off_X));
+#endif
+  Node *p2 = gvn->transform(new AddPNode(subklass,subklass,chk_off_X));
   // For some types like interfaces the following loadKlass is from a 1-word
   // cache which is mutable so can't use immutable memory.  Other
   // types load from the super-class display table which is immutable.
-  Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
-  Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
+  m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr()));
+  Node *kmem = might_be_cache ? m : C->immutable_memory();
+  Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
 
   // Compile speed common case: ARE a subtype and we canNOT fail
   if( superklass == nkls )
-    return top();             // false path is dead; no test needed.
+    return C->top();             // false path is dead; no test needed.
 
   // See if we get an immediate positive hit.  Happens roughly 83% of the
   // time.  Test to see if the value loaded just previously from the subklass
   // is exactly the superklass.
-  Node *cmp1 = _gvn.transform( new CmpPNode( superklass, nkls ) );
-  Node *bol1 = _gvn.transform( new BoolNode( cmp1, BoolTest::eq ) );
-  IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN );
-  Node *iftrue1 = _gvn.transform( new IfTrueNode ( iff1 ) );
-  set_control(    _gvn.transform( new IfFalseNode( iff1 ) ) );
+  IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS);
+  Node *iftrue1 = gvn->transform( new IfTrueNode (iff1));
+  *ctrl = gvn->transform(new IfFalseNode(iff1));
 
   // Compile speed common case: Check for being deterministic right now.  If
   // chk_off is a constant and not equal to cacheoff then we are NOT a
   // subklass.  In this case we need exactly the 1 test above and we can
   // return those results immediately.
   if (!might_be_cache) {
-    Node* not_subtype_ctrl = control();
-    set_control(iftrue1); // We need exactly the 1 test above
+    Node* not_subtype_ctrl = *ctrl;
+    *ctrl = iftrue1; // We need exactly the 1 test above
     return not_subtype_ctrl;
   }
 
   // Gather the various success & failures here
   RegionNode *r_ok_subtype = new RegionNode(4);
-  record_for_igvn(r_ok_subtype);
+  gvn->record_for_igvn(r_ok_subtype);
   RegionNode *r_not_subtype = new RegionNode(3);
-  record_for_igvn(r_not_subtype);
+  gvn->record_for_igvn(r_not_subtype);
 
   r_ok_subtype->init_req(1, iftrue1);
 
@@ -2635,21 +2651,17 @@
   // check-offset points into the subklass display list or the 1-element
   // cache.  If it points to the display (and NOT the cache) and the display
   // missed then it's not a subtype.
-  Node *cacheoff = _gvn.intcon(cacheoff_con);
-  Node *cmp2 = _gvn.transform( new CmpINode( chk_off, cacheoff ) );
-  Node *bol2 = _gvn.transform( new BoolNode( cmp2, BoolTest::ne ) );
-  IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN );
-  r_not_subtype->init_req(1, _gvn.transform( new IfTrueNode (iff2) ) );
-  set_control(                _gvn.transform( new IfFalseNode(iff2) ) );
+  Node *cacheoff = gvn->intcon(cacheoff_con);
+  IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT);
+  r_not_subtype->init_req(1, gvn->transform(new IfTrueNode (iff2)));
+  *ctrl = gvn->transform(new IfFalseNode(iff2));
 
   // Check for self.  Very rare to get here, but it is taken 1/3 the time.
   // No performance impact (too rare) but allows sharing of secondary arrays
   // which has some footprint reduction.
-  Node *cmp3 = _gvn.transform( new CmpPNode( subklass, superklass ) );
-  Node *bol3 = _gvn.transform( new BoolNode( cmp3, BoolTest::eq ) );
-  IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN );
-  r_ok_subtype->init_req(2, _gvn.transform( new IfTrueNode ( iff3 ) ) );
-  set_control(               _gvn.transform( new IfFalseNode( iff3 ) ) );
+  IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS);
+  r_ok_subtype->init_req(2, gvn->transform(new IfTrueNode(iff3)));
+  *ctrl = gvn->transform(new IfFalseNode(iff3));
 
   // -- Roads not taken here: --
   // We could also have chosen to perform the self-check at the beginning
@@ -2672,68 +2684,16 @@
   // out of line, and it can only improve I-cache density.
   // The decision to inline or out-of-line this final check is platform
   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
-  Node* psc = _gvn.transform(
-    new PartialSubtypeCheckNode(control(), subklass, superklass) );
-
-  Node *cmp4 = _gvn.transform( new CmpPNode( psc, null() ) );
-  Node *bol4 = _gvn.transform( new BoolNode( cmp4, BoolTest::ne ) );
-  IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN );
-  r_not_subtype->init_req(2, _gvn.transform( new IfTrueNode (iff4) ) );
-  r_ok_subtype ->init_req(3, _gvn.transform( new IfFalseNode(iff4) ) );
+  Node* psc = gvn->transform(
+    new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
+
+  IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn->zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
+  r_not_subtype->init_req(2, gvn->transform(new IfTrueNode (iff4)));
+  r_ok_subtype ->init_req(3, gvn->transform(new IfFalseNode(iff4)));
 
   // Return false path; set default control to true path.
-  set_control( _gvn.transform(r_ok_subtype) );
-  return _gvn.transform(r_not_subtype);
-}
-
-//----------------------------static_subtype_check-----------------------------
-// Shortcut important common cases when superklass is exact:
-// (0) superklass is java.lang.Object (can occur in reflective code)
-// (1) subklass is already limited to a subtype of superklass => always ok
-// (2) subklass does not overlap with superklass => always fail
-// (3) superklass has NO subtypes and we can check with a simple compare.
-int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
-  if (StressReflectiveCode) {
-    return SSC_full_test;       // Let caller generate the general case.
-  }
-
-  if (superk == env()->Object_klass()) {
-    return SSC_always_true;     // (0) this test cannot fail
-  }
-
-  ciType* superelem = superk;
-  if (superelem->is_array_klass())
-    superelem = superelem->as_array_klass()->base_element_type();
-
-  if (!subk->is_interface()) {  // cannot trust static interface types yet
-    if (subk->is_subtype_of(superk)) {
-      return SSC_always_true;   // (1) false path dead; no dynamic test needed
-    }
-    if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
-        !superk->is_subtype_of(subk)) {
-      return SSC_always_false;
-    }
-  }
-
-  // If casting to an instance klass, it must have no subtypes
-  if (superk->is_interface()) {
-    // Cannot trust interfaces yet.
-    // %%% S.B. superk->nof_implementors() == 1
-  } else if (superelem->is_instance_klass()) {
-    ciInstanceKlass* ik = superelem->as_instance_klass();
-    if (!ik->has_subklass() && !ik->is_interface()) {
-      if (!ik->is_final()) {
-        // Add a dependency if there is a chance of a later subclass.
-        C->dependencies()->assert_leaf_type(ik);
-      }
-      return SSC_easy_test;     // (3) caller can do a simple ptr comparison
-    }
-  } else {
-    // A primitive array type has no subtypes.
-    return SSC_easy_test;       // (3) caller can do a simple ptr comparison
-  }
-
-  return SSC_full_test;
+  *ctrl = gvn->transform(r_ok_subtype);
+  return gvn->transform(r_not_subtype);
 }
 
 // Profile-driven exact type check:
@@ -2813,7 +2773,7 @@
   ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
   if (exact_kls != NULL) {// no cast failures here
     if (require_klass == NULL ||
-        static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
+        C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
       // If we narrow the type to match what the type profile sees or
       // the speculative type, we can then remove the rest of the
       // cast.
@@ -2833,7 +2793,7 @@
       }
       return exact_obj;
     }
-    // assert(ssc == SSC_always_true)... except maybe the profile lied to us.
+    // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us.
   }
 
   return NULL;
@@ -2938,8 +2898,8 @@
     ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
     ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
     if (subk != NULL && subk->is_loaded()) {
-      int static_res = static_subtype_check(superk, subk);
-      known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
+      int static_res = C->static_subtype_check(superk, subk);
+      known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
     }
   }
 
@@ -3007,13 +2967,13 @@
   if (tk->singleton()) {
     const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
     if (objtp != NULL && objtp->klass() != NULL) {
-      switch (static_subtype_check(tk->klass(), objtp->klass())) {
-      case SSC_always_true:
+      switch (C->static_subtype_check(tk->klass(), objtp->klass())) {
+      case Compile::SSC_always_true:
         // If we know the type check always succeed then we don't use
         // the profiling data at this bytecode. Don't lose it, feed it
         // to the type system as a speculative type.
         return record_profiled_receiver_for_speculation(obj);
-      case SSC_always_false:
+      case Compile::SSC_always_false:
         // It needs a null check because a null will *pass* the cast check.
         // A non-null value will always produce an exception.
         return null_assert(obj);
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -829,17 +829,13 @@
   Node* gen_checkcast( Node *subobj, Node* superkls,
                        Node* *failure_control = NULL );
 
-  // Generate a subtyping check.  Takes as input the subtype and supertype.
-  // Returns 2 values: sets the default control() to the true path and
-  // returns the false path.  Only reads from constant memory taken from the
-  // default memory; does not write anything.  It also doesn't take in an
-  // Object; if you wish to check an Object you need to load the Object's
-  // class prior to coming here.
-  Node* gen_subtype_check(Node* subklass, Node* superklass);
-
-  // Static parse-time type checking logic for gen_subtype_check:
-  enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
-  int static_subtype_check(ciKlass* superk, ciKlass* subk);
+  Node* gen_subtype_check(Node* subklass, Node* superklass) {
+    MergeMemNode* mem = merged_memory();
+    Node* ctrl = control();
+    Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
+    set_control(ctrl);
+    return n;
+  }
 
   // Exact type check used for predicted calls and casts.
   // Rewrites (*casted_receiver) to be casted to the stronger type.
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -503,7 +503,7 @@
   jint  off = 0;
   if (l->is_top()) {
     return 0;
-  } else if (l->is_Add()) {
+  } else if (l->Opcode() == Op_AddI) {
     if ((off = l->in(1)->find_int_con(0)) != 0) {
       ind = l->in(2);
     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
--- a/hotspot/src/share/vm/opto/library_call.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -146,15 +146,10 @@
   Node* generate_negative_guard(Node* index, RegionNode* region,
                                 // resulting CastII of index:
                                 Node* *pos_index = NULL);
-  Node* generate_nonpositive_guard(Node* index, bool never_negative,
-                                   // resulting CastII of index:
-                                   Node* *pos_index = NULL);
   Node* generate_limit_guard(Node* offset, Node* subseq_length,
                              Node* array_length,
                              RegionNode* region);
   Node* generate_current_thread(Node* &tls_output);
-  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
-                              bool disjoint_bases, const char* &name, bool dest_uninitialized);
   Node* load_mirror_from_klass(Node* klass);
   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
                                       RegionNode* region, int null_path,
@@ -264,47 +259,8 @@
 
   // Helper functions for inlining arraycopy
   bool inline_arraycopy();
-  void generate_arraycopy(const TypePtr* adr_type,
-                          BasicType basic_elem_type,
-                          Node* src,  Node* src_offset,
-                          Node* dest, Node* dest_offset,
-                          Node* copy_length,
-                          bool disjoint_bases = false,
-                          bool length_never_negative = false,
-                          RegionNode* slow_region = NULL);
   AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
                                                 RegionNode* slow_region);
-  void generate_clear_array(const TypePtr* adr_type,
-                            Node* dest,
-                            BasicType basic_elem_type,
-                            Node* slice_off,
-                            Node* slice_len,
-                            Node* slice_end);
-  bool generate_block_arraycopy(const TypePtr* adr_type,
-                                BasicType basic_elem_type,
-                                AllocateNode* alloc,
-                                Node* src,  Node* src_offset,
-                                Node* dest, Node* dest_offset,
-                                Node* dest_size, bool dest_uninitialized);
-  void generate_slow_arraycopy(const TypePtr* adr_type,
-                               Node* src,  Node* src_offset,
-                               Node* dest, Node* dest_offset,
-                               Node* copy_length, bool dest_uninitialized);
-  Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
-                                     Node* dest_elem_klass,
-                                     Node* src,  Node* src_offset,
-                                     Node* dest, Node* dest_offset,
-                                     Node* copy_length, bool dest_uninitialized);
-  Node* generate_generic_arraycopy(const TypePtr* adr_type,
-                                   Node* src,  Node* src_offset,
-                                   Node* dest, Node* dest_offset,
-                                   Node* copy_length, bool dest_uninitialized);
-  void generate_unchecked_arraycopy(const TypePtr* adr_type,
-                                    BasicType basic_elem_type,
-                                    bool disjoint_bases,
-                                    Node* src,  Node* src_offset,
-                                    Node* dest, Node* dest_offset,
-                                    Node* copy_length, bool dest_uninitialized);
   typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
   bool inline_unsafe_ordered_store(BasicType type);
@@ -1049,25 +1005,6 @@
   return is_neg;
 }
 
-inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
-                                                        Node* *pos_index) {
-  if (stopped())
-    return NULL;                // already stopped
-  if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
-    return NULL;                // index is already adequately typed
-  Node* cmp_le = _gvn.transform(new CmpINode(index, intcon(0)));
-  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
-  Node* bol_le = _gvn.transform(new BoolNode(cmp_le, le_or_eq));
-  Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
-  if (is_notp != NULL && pos_index != NULL) {
-    // Emulate effect of Parse::adjust_map_after_if.
-    Node* ccast = new CastIINode(index, TypeInt::POS1);
-    ccast->set_req(0, control());
-    (*pos_index) = _gvn.transform(ccast);
-  }
-  return is_notp;
-}
-
 // Make sure that 'position' is a valid limit index, in [0..length].
 // There are two equivalent plans for checking this:
 //   A. (offset + copyLength)  unsigned<=  arrayLength
@@ -2658,7 +2595,8 @@
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
 
   if (!is_store) {
-    Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
+    MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
+    Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile);
     // load value
     switch (type) {
     case T_BOOLEAN:
@@ -3928,13 +3866,18 @@
       // oop stores need checking.
       // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
       // This will fail a store-check if x contains any non-nulls.
-      bool disjoint_bases = true;
-      // if start > orig_length then the length of the copy may be
-      // negative.
-      bool length_never_negative = !is_copyOfRange;
-      generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
-                         original, start, newcopy, intcon(0), moved,
-                         disjoint_bases, length_never_negative);
+
+      Node* alloc = tightly_coupled_allocation(newcopy, NULL);
+
+      ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, alloc != NULL);
+      if (!is_copyOfRange) {
+        ac->set_copyof();
+      } else {
+        ac->set_copyofrange();
+      }
+      Node* n = _gvn.transform(ac);
+      assert(n == ac, "cannot disappear");
+      ac->connect_outputs(this);
     }
   } // original reexecute is set back here
 
@@ -4445,10 +4388,12 @@
   countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong) ));
 
   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
-  bool disjoint_bases = true;
-  generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
-                               src, NULL, dest, NULL, countx,
-                               /*dest_uninitialized*/true);
+
+  ArrayCopyNode* ac = ArrayCopyNode::make(this, false, src, NULL, dest, NULL, countx, false);
+  ac->set_clonebasic();
+  Node* n = _gvn.transform(ac);
+  assert(n == ac, "cannot disappear");
+  set_predefined_output_for_runtime_call(ac, ac->in(TypeFunc::Memory), raw_adr_type);
 
   // If necessary, emit some card marks afterwards.  (Non-arrays only.)
   if (card_mark) {
@@ -4557,12 +4502,13 @@
           PreserveJVMState pjvms2(this);
           set_control(is_obja);
           // Generate a direct call to the right arraycopy function(s).
-          bool disjoint_bases = true;
-          bool length_never_negative = true;
-          generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
-                             obj, intcon(0), alloc_obj, intcon(0),
-                             obj_length,
-                             disjoint_bases, length_never_negative);
+          Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
+          ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL);
+          ac->set_cloneoop();
+          Node* n = _gvn.transform(ac);
+          assert(n == ac, "cannot disappear");
+          ac->connect_outputs(this);
+
           result_reg->init_req(_objArray_path, control());
           result_val->init_req(_objArray_path, alloc_obj);
           result_i_o ->set_req(_objArray_path, i_o());
@@ -4656,42 +4602,6 @@
   return true;
 }
 
-//------------------------------basictype2arraycopy----------------------------
-address LibraryCallKit::basictype2arraycopy(BasicType t,
-                                            Node* src_offset,
-                                            Node* dest_offset,
-                                            bool disjoint_bases,
-                                            const char* &name,
-                                            bool dest_uninitialized) {
-  const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
-  const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
-
-  bool aligned = false;
-  bool disjoint = disjoint_bases;
-
-  // if the offsets are the same, we can treat the memory regions as
-  // disjoint, because either the memory regions are in different arrays,
-  // or they are identical (which we can treat as disjoint.)  We can also
-  // treat a copy with a destination index  less that the source index
-  // as disjoint since a low->high copy will work correctly in this case.
-  if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
-      dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
-    // both indices are constants
-    int s_offs = src_offset_inttype->get_con();
-    int d_offs = dest_offset_inttype->get_con();
-    int element_size = type2aelembytes(t);
-    aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
-              ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
-    if (s_offs >= d_offs)  disjoint = true;
-  } else if (src_offset == dest_offset && src_offset != NULL) {
-    // This can occur if the offsets are identical non-constants.
-    disjoint = true;
-  }
-
-  return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
-}
-
-
 //------------------------------inline_arraycopy-----------------------
 // public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
 //                                                      Object dest, int destPos,
@@ -4704,13 +4614,26 @@
   Node* dest_offset = argument(3);  // type: int
   Node* length      = argument(4);  // type: int
 
-  // Compile time checks.  If any of these checks cannot be verified at compile time,
-  // we do not make a fast path for this call.  Instead, we let the call remain as it
-  // is.  The checks we choose to mandate at compile time are:
-  //
+  // The following tests must be performed
   // (1) src and dest are arrays.
-  const Type* src_type  = src->Value(&_gvn);
-  const Type* dest_type = dest->Value(&_gvn);
+  // (2) src and dest arrays must have elements of the same BasicType
+  // (3) src and dest must not be null.
+  // (4) src_offset must not be negative.
+  // (5) dest_offset must not be negative.
+  // (6) length must not be negative.
+  // (7) src_offset + length must not exceed length of src.
+  // (8) dest_offset + length must not exceed length of dest.
+  // (9) each element of an oop array must be assignable
+
+  // (3) src and dest must not be null.
+  // always do this here because we need the JVM state for uncommon traps
+  src  = null_check(src,  T_ARRAY);
+  dest = null_check(dest, T_ARRAY);
+
+  bool notest = false;
+
+  const Type* src_type  = _gvn.type(src);
+  const Type* dest_type = _gvn.type(dest);
   const TypeAryPtr* top_src  = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
 
@@ -4768,556 +4691,119 @@
     }
   }
 
-  if (!has_src || !has_dest) {
-    // Conservatively insert a memory barrier on all memory slices.
-    // Do not let writes into the source float below the arraycopy.
-    insert_mem_bar(Op_MemBarCPUOrder);
-
-    // Call StubRoutines::generic_arraycopy stub.
-    generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
-                       src, src_offset, dest, dest_offset, length);
-
-    // Do not let reads from the destination float above the arraycopy.
-    // Since we cannot type the arrays, we don't know which slices
-    // might be affected.  We could restrict this barrier only to those
-    // memory slices which pertain to array elements--but don't bother.
-    if (!InsertMemBarAfterArraycopy)
-      // (If InsertMemBarAfterArraycopy, there is already one in place.)
-      insert_mem_bar(Op_MemBarCPUOrder);
-    return true;
-  }
-
-  // (2) src and dest arrays must have elements of the same BasicType
-  // Figure out the size and type of the elements we will be copying.
-  BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
-  BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
-  if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
-  if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
-
-  if (src_elem != dest_elem || dest_elem == T_VOID) {
-    // The component types are not the same or are not recognized.  Punt.
-    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
-    generate_slow_arraycopy(TypePtr::BOTTOM,
-                            src, src_offset, dest, dest_offset, length,
-                            /*dest_uninitialized*/false);
-    return true;
-  }
-
-  if (src_elem == T_OBJECT) {
-    // If both arrays are object arrays then having the exact types
-    // for both will remove the need for a subtype check at runtime
-    // before the call and may make it possible to pick a faster copy
-    // routine (without a subtype check on every element)
-    // Do we have the exact type of src?
-    bool could_have_src = src_spec;
-    // Do we have the exact type of dest?
-    bool could_have_dest = dest_spec;
-    ciKlass* src_k = top_src->klass();
-    ciKlass* dest_k = top_dest->klass();
-    if (!src_spec) {
-      src_k = src_type->speculative_type_not_null();
-      if (src_k != NULL && src_k->is_array_klass()) {
+  if (has_src && has_dest) {
+    BasicType src_elem  = top_src->klass()->as_array_klass()->element_type()->basic_type();
+    BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
+    if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
+    if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
+
+    if (src_elem == dest_elem && src_elem == T_OBJECT) {
+      // If both arrays are object arrays then having the exact types
+      // for both will remove the need for a subtype check at runtime
+      // before the call and may make it possible to pick a faster copy
+      // routine (without a subtype check on every element)
+      // Do we have the exact type of src?
+      bool could_have_src = src_spec;
+      // Do we have the exact type of dest?
+      bool could_have_dest = dest_spec;
+      ciKlass* src_k = top_src->klass();
+      ciKlass* dest_k = top_dest->klass();
+      if (!src_spec) {
+        src_k = src_type->speculative_type_not_null();
+        if (src_k != NULL && src_k->is_array_klass()) {
           could_have_src = true;
+        }
       }
-    }
-    if (!dest_spec) {
-      dest_k = dest_type->speculative_type_not_null();
-      if (dest_k != NULL && dest_k->is_array_klass()) {
-        could_have_dest = true;
+      if (!dest_spec) {
+        dest_k = dest_type->speculative_type_not_null();
+        if (dest_k != NULL && dest_k->is_array_klass()) {
+          could_have_dest = true;
+        }
       }
-    }
-    if (could_have_src && could_have_dest) {
-      // If we can have both exact types, emit the missing guards
-      if (could_have_src && !src_spec) {
-        src = maybe_cast_profiled_obj(src, src_k);
-      }
-      if (could_have_dest && !dest_spec) {
-        dest = maybe_cast_profiled_obj(dest, dest_k);
+      if (could_have_src && could_have_dest) {
+        // If we can have both exact types, emit the missing guards
+        if (could_have_src && !src_spec) {
+          src = maybe_cast_profiled_obj(src, src_k);
+        }
+        if (could_have_dest && !dest_spec) {
+          dest = maybe_cast_profiled_obj(dest, dest_k);
+        }
       }
     }
   }
 
-  //---------------------------------------------------------------------------
-  // We will make a fast path for this call to arraycopy.
-
-  // We have the following tests left to perform:
-  //
-  // (3) src and dest must not be null.
-  // (4) src_offset must not be negative.
-  // (5) dest_offset must not be negative.
-  // (6) length must not be negative.
-  // (7) src_offset + length must not exceed length of src.
-  // (8) dest_offset + length must not exceed length of dest.
-  // (9) each element of an oop array must be assignable
-
-  RegionNode* slow_region = new RegionNode(1);
-  record_for_igvn(slow_region);
-
-  // (3) operands must not be null
-  // We currently perform our null checks with the null_check routine.
-  // This means that the null exceptions will be reported in the caller
-  // rather than (correctly) reported inside of the native arraycopy call.
-  // This should be corrected, given time.  We do our null check with the
-  // stack pointer restored.
-  src  = null_check(src,  T_ARRAY);
-  dest = null_check(dest, T_ARRAY);
-
-  // (4) src_offset must not be negative.
-  generate_negative_guard(src_offset, slow_region);
-
-  // (5) dest_offset must not be negative.
-  generate_negative_guard(dest_offset, slow_region);
-
-  // (6) length must not be negative (moved to generate_arraycopy()).
-  // generate_negative_guard(length, slow_region);
-
-  // (7) src_offset + length must not exceed length of src.
-  generate_limit_guard(src_offset, length,
-                       load_array_length(src),
-                       slow_region);
-
-  // (8) dest_offset + length must not exceed length of dest.
-  generate_limit_guard(dest_offset, length,
-                       load_array_length(dest),
-                       slow_region);
-
-  // (9) each element of an oop array must be assignable
-  // The generate_arraycopy subroutine checks this.
-
-  // This is where the memory effects are placed:
-  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
-  generate_arraycopy(adr_type, dest_elem,
-                     src, src_offset, dest, dest_offset, length,
-                     false, false, slow_region);
-
-  return true;
-}
-
-//-----------------------------generate_arraycopy----------------------
-// Generate an optimized call to arraycopy.
-// Caller must guard against non-arrays.
-// Caller must determine a common array basic-type for both arrays.
-// Caller must validate offsets against array bounds.
-// The slow_region has already collected guard failure paths
-// (such as out of bounds length or non-conformable array types).
-// The generated code has this shape, in general:
-//
-//     if (length == 0)  return   // via zero_path
-//     slowval = -1
-//     if (types unknown) {
-//       slowval = call generic copy loop
-//       if (slowval == 0)  return  // via checked_path
-//     } else if (indexes in bounds) {
-//       if ((is object array) && !(array type check)) {
-//         slowval = call checked copy loop
-//         if (slowval == 0)  return  // via checked_path
-//       } else {
-//         call bulk copy loop
-//         return  // via fast_path
-//       }
-//     }
-//     // adjust params for remaining work:
-//     if (slowval != -1) {
-//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
-//     }
-//   slow_region:
-//     call slow arraycopy(src, src_offset, dest, dest_offset, length)
-//     return  // via slow_call_path
-//
-// This routine is used from several intrinsics:  System.arraycopy,
-// Object.clone (the array subcase), and Arrays.copyOf[Range].
-//
-void
-LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
-                                   BasicType basic_elem_type,
-                                   Node* src,  Node* src_offset,
-                                   Node* dest, Node* dest_offset,
-                                   Node* copy_length,
-                                   bool disjoint_bases,
-                                   bool length_never_negative,
-                                   RegionNode* slow_region) {
-
-  if (slow_region == NULL) {
-    slow_region = new RegionNode(1);
+  if (!too_many_traps(Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {
+    // validate arguments: enables transformation the ArrayCopyNode
+    notest = true;
+
+    RegionNode* slow_region = new RegionNode(1);
     record_for_igvn(slow_region);
-  }
-
-  Node* original_dest      = dest;
-  AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
-  bool  dest_uninitialized = false;
-
-  // See if this is the initialization of a newly-allocated array.
-  // If so, we will take responsibility here for initializing it to zero.
-  // (Note:  Because tightly_coupled_allocation performs checks on the
-  // out-edges of the dest, we need to avoid making derived pointers
-  // from it until we have checked its uses.)
-  if (ReduceBulkZeroing
-      && !ZeroTLAB              // pointless if already zeroed
-      && basic_elem_type != T_CONFLICT // avoid corner case
-      && !src->eqv_uncast(dest)
-      && ((alloc = tightly_coupled_allocation(dest, slow_region))
-          != NULL)
-      && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
-      && alloc->maybe_set_complete(&_gvn)) {
-    // "You break it, you buy it."
-    InitializeNode* init = alloc->initialization();
-    assert(init->is_complete(), "we just did this");
-    init->set_complete_with_arraycopy();
-    assert(dest->is_CheckCastPP(), "sanity");
-    assert(dest->in(0)->in(0) == init, "dest pinned");
-    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
-    // From this point on, every exit path is responsible for
-    // initializing any non-copied parts of the object to zero.
-    // Also, if this flag is set we make sure that arraycopy interacts properly
-    // with G1, eliding pre-barriers. See CR 6627983.
-    dest_uninitialized = true;
-  } else {
-    // No zeroing elimination here.
-    alloc             = NULL;
-    //original_dest   = dest;
-    //dest_uninitialized = false;
-  }
-
-  // Results are placed here:
-  enum { fast_path        = 1,  // normal void-returning assembly stub
-         checked_path     = 2,  // special assembly stub with cleanup
-         slow_call_path   = 3,  // something went wrong; call the VM
-         zero_path        = 4,  // bypass when length of copy is zero
-         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
-         PATH_LIMIT       = 6
-  };
-  RegionNode* result_region = new RegionNode(PATH_LIMIT);
-  PhiNode*    result_i_o    = new PhiNode(result_region, Type::ABIO);
-  PhiNode*    result_memory = new PhiNode(result_region, Type::MEMORY, adr_type);
-  record_for_igvn(result_region);
-  _gvn.set_type_bottom(result_i_o);
-  _gvn.set_type_bottom(result_memory);
-  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
-
-  // The slow_control path:
-  Node* slow_control;
-  Node* slow_i_o = i_o();
-  Node* slow_mem = memory(adr_type);
-  debug_only(slow_control = (Node*) badAddress);
-
-  // Checked control path:
-  Node* checked_control = top();
-  Node* checked_mem     = NULL;
-  Node* checked_i_o     = NULL;
-  Node* checked_value   = NULL;
-
-  if (basic_elem_type == T_CONFLICT) {
-    assert(!dest_uninitialized, "");
-    Node* cv = generate_generic_arraycopy(adr_type,
-                                          src, src_offset, dest, dest_offset,
-                                          copy_length, dest_uninitialized);
-    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
-    checked_control = control();
-    checked_i_o     = i_o();
-    checked_mem     = memory(adr_type);
-    checked_value   = cv;
-    set_control(top());         // no fast path
-  }
-
-  Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
-  if (not_pos != NULL) {
-    PreserveJVMState pjvms(this);
-    set_control(not_pos);
-
-    // (6) length must not be negative.
-    if (!length_never_negative) {
-      generate_negative_guard(copy_length, slow_region);
-    }
-
-    // copy_length is 0.
-    if (!stopped() && dest_uninitialized) {
-      Node* dest_length = alloc->in(AllocateNode::ALength);
-      if (copy_length->eqv_uncast(dest_length)
-          || _gvn.find_int_con(dest_length, 1) <= 0) {
-        // There is no zeroing to do. No need for a secondary raw memory barrier.
-      } else {
-        // Clear the whole thing since there are no source elements to copy.
-        generate_clear_array(adr_type, dest, basic_elem_type,
-                             intcon(0), NULL,
-                             alloc->in(AllocateNode::AllocSize));
-        // Use a secondary InitializeNode as raw memory barrier.
-        // Currently it is needed only on this path since other
-        // paths have stub or runtime calls as raw memory barriers.
-        InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
-                                                       Compile::AliasIdxRaw,
-                                                       top())->as_Initialize();
-        init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
-      }
-    }
-
-    // Present the results of the fast call.
-    result_region->init_req(zero_path, control());
-    result_i_o   ->init_req(zero_path, i_o());
-    result_memory->init_req(zero_path, memory(adr_type));
-  }
-
-  if (!stopped() && dest_uninitialized) {
-    // We have to initialize the *uncopied* part of the array to zero.
-    // The copy destination is the slice dest[off..off+len].  The other slices
-    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
-    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
-    Node* dest_length = alloc->in(AllocateNode::ALength);
-    Node* dest_tail   = _gvn.transform(new AddINode(dest_offset, copy_length));
-
-    // If there is a head section that needs zeroing, do it now.
-    if (find_int_con(dest_offset, -1) != 0) {
-      generate_clear_array(adr_type, dest, basic_elem_type,
-                           intcon(0), dest_offset,
-                           NULL);
-    }
-
-    // Next, perform a dynamic check on the tail length.
-    // It is often zero, and we can win big if we prove this.
-    // There are two wins:  Avoid generating the ClearArray
-    // with its attendant messy index arithmetic, and upgrade
-    // the copy to a more hardware-friendly word size of 64 bits.
-    Node* tail_ctl = NULL;
-    if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
-      Node* cmp_lt   = _gvn.transform(new CmpINode(dest_tail, dest_length));
-      Node* bol_lt   = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
-      tail_ctl = generate_slow_guard(bol_lt, NULL);
-      assert(tail_ctl != NULL || !stopped(), "must be an outcome");
-    }
-
-    // At this point, let's assume there is no tail.
-    if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
-      // There is no tail.  Try an upgrade to a 64-bit copy.
-      bool didit = false;
-      { PreserveJVMState pjvms(this);
-        didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
-                                         src, src_offset, dest, dest_offset,
-                                         dest_size, dest_uninitialized);
-        if (didit) {
-          // Present the results of the block-copying fast call.
-          result_region->init_req(bcopy_path, control());
-          result_i_o   ->init_req(bcopy_path, i_o());
-          result_memory->init_req(bcopy_path, memory(adr_type));
-        }
-      }
-      if (didit)
-        set_control(top());     // no regular fast path
-    }
-
-    // Clear the tail, if any.
-    if (tail_ctl != NULL) {
-      Node* notail_ctl = stopped() ? NULL : control();
-      set_control(tail_ctl);
-      if (notail_ctl == NULL) {
-        generate_clear_array(adr_type, dest, basic_elem_type,
-                             dest_tail, NULL,
-                             dest_size);
-      } else {
-        // Make a local merge.
-        Node* done_ctl = new RegionNode(3);
-        Node* done_mem = new PhiNode(done_ctl, Type::MEMORY, adr_type);
-        done_ctl->init_req(1, notail_ctl);
-        done_mem->init_req(1, memory(adr_type));
-        generate_clear_array(adr_type, dest, basic_elem_type,
-                             dest_tail, NULL,
-                             dest_size);
-        done_ctl->init_req(2, control());
-        done_mem->init_req(2, memory(adr_type));
-        set_control( _gvn.transform(done_ctl));
-        set_memory(  _gvn.transform(done_mem), adr_type );
-      }
-    }
-  }
-
-  BasicType copy_type = basic_elem_type;
-  assert(basic_elem_type != T_ARRAY, "caller must fix this");
-  if (!stopped() && copy_type == T_OBJECT) {
-    // If src and dest have compatible element types, we can copy bits.
-    // Types S[] and D[] are compatible if D is a supertype of S.
-    //
-    // If they are not, we will use checked_oop_disjoint_arraycopy,
-    // which performs a fast optimistic per-oop check, and backs off
-    // further to JVM_ArrayCopy on the first per-oop check that fails.
-    // (Actually, we don't move raw bits only; the GC requires card marks.)
-
-    // Get the Klass* for both src and dest
+
+    // (1) src and dest are arrays.
+    generate_non_array_guard(load_object_klass(src), slow_region);
+    generate_non_array_guard(load_object_klass(dest), slow_region);
+
+    // (2) src and dest arrays must have elements of the same BasicType
+    // done at macro expansion or at Ideal transformation time
+
+    // (4) src_offset must not be negative.
+    generate_negative_guard(src_offset, slow_region);
+
+    // (5) dest_offset must not be negative.
+    generate_negative_guard(dest_offset, slow_region);
+
+    // (7) src_offset + length must not exceed length of src.
+    generate_limit_guard(src_offset, length,
+                         load_array_length(src),
+                         slow_region);
+
+    // (8) dest_offset + length must not exceed length of dest.
+    generate_limit_guard(dest_offset, length,
+                         load_array_length(dest),
+                         slow_region);
+
+    // (9) each element of an oop array must be assignable
     Node* src_klass  = load_object_klass(src);
     Node* dest_klass = load_object_klass(dest);
-
-    // Generate the subtype check.
-    // This might fold up statically, or then again it might not.
-    //
-    // Non-static example:  Copying List<String>.elements to a new String[].
-    // The backing store for a List<String> is always an Object[],
-    // but its elements are always type String, if the generic types
-    // are correct at the source level.
-    //
-    // Test S[] against D[], not S against D, because (probably)
-    // the secondary supertype cache is less busy for S[] than S.
-    // This usually only matters when D is an interface.
     Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
-    // Plug failing path into checked_oop_disjoint_arraycopy
+
     if (not_subtype_ctrl != top()) {
       PreserveJVMState pjvms(this);
       set_control(not_subtype_ctrl);
-      // (At this point we can assume disjoint_bases, since types differ.)
-      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
-      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
-      Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
-      Node* dest_elem_klass = _gvn.transform(n1);
-      Node* cv = generate_checkcast_arraycopy(adr_type,
-                                              dest_elem_klass,
-                                              src, src_offset, dest, dest_offset,
-                                              ConvI2X(copy_length), dest_uninitialized);
-      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
-      checked_control = control();
-      checked_i_o     = i_o();
-      checked_mem     = memory(adr_type);
-      checked_value   = cv;
+      uncommon_trap(Deoptimization::Reason_intrinsic,
+                    Deoptimization::Action_make_not_entrant);
+      assert(stopped(), "Should be stopped");
     }
-    // At this point we know we do not need type checks on oop stores.
-
-    // Let's see if we need card marks:
-    if (alloc != NULL && use_ReduceInitialCardMarks()) {
-      // If we do not need card marks, copy using the jint or jlong stub.
-      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
-      assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
-             "sizes agree");
+    {
+      PreserveJVMState pjvms(this);
+      set_control(_gvn.transform(slow_region));
+      uncommon_trap(Deoptimization::Reason_intrinsic,
+                    Deoptimization::Action_make_not_entrant);
+      assert(stopped(), "Should be stopped");
     }
   }
 
-  if (!stopped()) {
-    // Generate the fast path, if possible.
-    PreserveJVMState pjvms(this);
-    generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
-                                 src, src_offset, dest, dest_offset,
-                                 ConvI2X(copy_length), dest_uninitialized);
-
-    // Present the results of the fast call.
-    result_region->init_req(fast_path, control());
-    result_i_o   ->init_req(fast_path, i_o());
-    result_memory->init_req(fast_path, memory(adr_type));
+  if (stopped()) {
+    return true;
   }
 
-  // Here are all the slow paths up to this point, in one bundle:
-  slow_control = top();
-  if (slow_region != NULL)
-    slow_control = _gvn.transform(slow_region);
-  DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
-
-  set_control(checked_control);
-  if (!stopped()) {
-    // Clean up after the checked call.
-    // The returned value is either 0 or -1^K,
-    // where K = number of partially transferred array elements.
-    Node* cmp = _gvn.transform(new CmpINode(checked_value, intcon(0)));
-    Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
-    IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
-
-    // If it is 0, we are done, so transfer to the end.
-    Node* checks_done = _gvn.transform(new IfTrueNode(iff));
-    result_region->init_req(checked_path, checks_done);
-    result_i_o   ->init_req(checked_path, checked_i_o);
-    result_memory->init_req(checked_path, checked_mem);
-
-    // If it is not zero, merge into the slow call.
-    set_control( _gvn.transform(new IfFalseNode(iff) ));
-    RegionNode* slow_reg2 = new RegionNode(3);
-    PhiNode*    slow_i_o2 = new PhiNode(slow_reg2, Type::ABIO);
-    PhiNode*    slow_mem2 = new PhiNode(slow_reg2, Type::MEMORY, adr_type);
-    record_for_igvn(slow_reg2);
-    slow_reg2  ->init_req(1, slow_control);
-    slow_i_o2  ->init_req(1, slow_i_o);
-    slow_mem2  ->init_req(1, slow_mem);
-    slow_reg2  ->init_req(2, control());
-    slow_i_o2  ->init_req(2, checked_i_o);
-    slow_mem2  ->init_req(2, checked_mem);
-
-    slow_control = _gvn.transform(slow_reg2);
-    slow_i_o     = _gvn.transform(slow_i_o2);
-    slow_mem     = _gvn.transform(slow_mem2);
-
-    if (alloc != NULL) {
-      // We'll restart from the very beginning, after zeroing the whole thing.
-      // This can cause double writes, but that's OK since dest is brand new.
-      // So we ignore the low 31 bits of the value returned from the stub.
-    } else {
-      // We must continue the copy exactly where it failed, or else
-      // another thread might see the wrong number of writes to dest.
-      Node* checked_offset = _gvn.transform(new XorINode(checked_value, intcon(-1)));
-      Node* slow_offset    = new PhiNode(slow_reg2, TypeInt::INT);
-      slow_offset->init_req(1, intcon(0));
-      slow_offset->init_req(2, checked_offset);
-      slow_offset  = _gvn.transform(slow_offset);
-
-      // Adjust the arguments by the conditionally incoming offset.
-      Node* src_off_plus  = _gvn.transform(new AddINode(src_offset,  slow_offset));
-      Node* dest_off_plus = _gvn.transform(new AddINode(dest_offset, slow_offset));
-      Node* length_minus  = _gvn.transform(new SubINode(copy_length, slow_offset));
-
-      // Tweak the node variables to adjust the code produced below:
-      src_offset  = src_off_plus;
-      dest_offset = dest_off_plus;
-      copy_length = length_minus;
-    }
+  AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
+  ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
+                                          // Create LoadRange and LoadKlass nodes for use during macro expansion here
+                                          // so the compiler has a chance to eliminate them: during macro expansion,
+                                          // we have to set their control (CastPP nodes are eliminated).
+                                          load_array_length(src), load_array_length(dest),
+                                          load_object_klass(src), load_object_klass(dest));
+
+  if (notest) {
+    ac->set_arraycopy_notest();
   }
 
-  set_control(slow_control);
-  if (!stopped()) {
-    // Generate the slow path, if needed.
-    PreserveJVMState pjvms(this);   // replace_in_map may trash the map
-
-    set_memory(slow_mem, adr_type);
-    set_i_o(slow_i_o);
-
-    if (dest_uninitialized) {
-      generate_clear_array(adr_type, dest, basic_elem_type,
-                           intcon(0), NULL,
-                           alloc->in(AllocateNode::AllocSize));
-    }
-
-    generate_slow_arraycopy(adr_type,
-                            src, src_offset, dest, dest_offset,
-                            copy_length, /*dest_uninitialized*/false);
-
-    result_region->init_req(slow_call_path, control());
-    result_i_o   ->init_req(slow_call_path, i_o());
-    result_memory->init_req(slow_call_path, memory(adr_type));
-  }
-
-  // Remove unused edges.
-  for (uint i = 1; i < result_region->req(); i++) {
-    if (result_region->in(i) == NULL)
-      result_region->init_req(i, top());
-  }
-
-  // Finished; return the combined state.
-  set_control( _gvn.transform(result_region));
-  set_i_o(     _gvn.transform(result_i_o)    );
-  set_memory(  _gvn.transform(result_memory), adr_type );
-
-  // The memory edges above are precise in order to model effects around
-  // array copies accurately to allow value numbering of field loads around
-  // arraycopy.  Such field loads, both before and after, are common in Java
-  // collections and similar classes involving header/array data structures.
-  //
-  // But with low number of register or when some registers are used or killed
-  // by arraycopy calls it causes registers spilling on stack. See 6544710.
-  // The next memory barrier is added to avoid it. If the arraycopy can be
-  // optimized away (which it can, sometimes) then we can manually remove
-  // the membar also.
-  //
-  // Do not let reads from the cloned object float above the arraycopy.
-  if (alloc != NULL) {
-    // Do not let stores that initialize this object be reordered with
-    // a subsequent store that would make this object accessible by
-    // other threads.
-    // Record what AllocateNode this StoreStore protects so that
-    // escape analysis can go from the MemBarStoreStoreNode to the
-    // AllocateNode and eliminate the MemBarStoreStoreNode if possible
-    // based on the escape status of the AllocateNode.
-    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
-  } else if (InsertMemBarAfterArraycopy)
-    insert_mem_bar(Op_MemBarCPUOrder);
+  Node* n = _gvn.transform(ac);
+  assert(n == ac, "cannot disappear");
+  ac->connect_outputs(this);
+
+  return true;
 }
 
 
@@ -5397,305 +4883,6 @@
   return alloc;
 }
 
-// Helper for initialization of arrays, creating a ClearArray.
-// It writes zero bits in [start..end), within the body of an array object.
-// The memory effects are all chained onto the 'adr_type' alias category.
-//
-// Since the object is otherwise uninitialized, we are free
-// to put a little "slop" around the edges of the cleared area,
-// as long as it does not go back into the array's header,
-// or beyond the array end within the heap.
-//
-// The lower edge can be rounded down to the nearest jint and the
-// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
-//
-// Arguments:
-//   adr_type           memory slice where writes are generated
-//   dest               oop of the destination array
-//   basic_elem_type    element type of the destination
-//   slice_idx          array index of first element to store
-//   slice_len          number of elements to store (or NULL)
-//   dest_size          total size in bytes of the array object
-//
-// Exactly one of slice_len or dest_size must be non-NULL.
-// If dest_size is non-NULL, zeroing extends to the end of the object.
-// If slice_len is non-NULL, the slice_idx value must be a constant.
-void
-LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
-                                     Node* dest,
-                                     BasicType basic_elem_type,
-                                     Node* slice_idx,
-                                     Node* slice_len,
-                                     Node* dest_size) {
-  // one or the other but not both of slice_len and dest_size:
-  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
-  if (slice_len == NULL)  slice_len = top();
-  if (dest_size == NULL)  dest_size = top();
-
-  // operate on this memory slice:
-  Node* mem = memory(adr_type); // memory slice to operate on
-
-  // scaling and rounding of indexes:
-  int scale = exact_log2(type2aelembytes(basic_elem_type));
-  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
-  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
-  int bump_bit  = (-1 << scale) & BytesPerInt;
-
-  // determine constant starts and ends
-  const intptr_t BIG_NEG = -128;
-  assert(BIG_NEG + 2*abase < 0, "neg enough");
-  intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
-  intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
-  if (slice_len_con == 0) {
-    return;                     // nothing to do here
-  }
-  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
-  intptr_t end_con   = find_intptr_t_con(dest_size, -1);
-  if (slice_idx_con >= 0 && slice_len_con >= 0) {
-    assert(end_con < 0, "not two cons");
-    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
-                       BytesPerLong);
-  }
-
-  if (start_con >= 0 && end_con >= 0) {
-    // Constant start and end.  Simple.
-    mem = ClearArrayNode::clear_memory(control(), mem, dest,
-                                       start_con, end_con, &_gvn);
-  } else if (start_con >= 0 && dest_size != top()) {
-    // Constant start, pre-rounded end after the tail of the array.
-    Node* end = dest_size;
-    mem = ClearArrayNode::clear_memory(control(), mem, dest,
-                                       start_con, end, &_gvn);
-  } else if (start_con >= 0 && slice_len != top()) {
-    // Constant start, non-constant end.  End needs rounding up.
-    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
-    intptr_t end_base  = abase + (slice_idx_con << scale);
-    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
-    Node*    end       = ConvI2X(slice_len);
-    if (scale != 0)
-      end = _gvn.transform(new LShiftXNode(end, intcon(scale) ));
-    end_base += end_round;
-    end = _gvn.transform(new AddXNode(end, MakeConX(end_base)));
-    end = _gvn.transform(new AndXNode(end, MakeConX(~end_round)));
-    mem = ClearArrayNode::clear_memory(control(), mem, dest,
-                                       start_con, end, &_gvn);
-  } else if (start_con < 0 && dest_size != top()) {
-    // Non-constant start, pre-rounded end after the tail of the array.
-    // This is almost certainly a "round-to-end" operation.
-    Node* start = slice_idx;
-    start = ConvI2X(start);
-    if (scale != 0)
-      start = _gvn.transform(new LShiftXNode( start, intcon(scale) ));
-    start = _gvn.transform(new AddXNode(start, MakeConX(abase)));
-    if ((bump_bit | clear_low) != 0) {
-      int to_clear = (bump_bit | clear_low);
-      // Align up mod 8, then store a jint zero unconditionally
-      // just before the mod-8 boundary.
-      if (((abase + bump_bit) & ~to_clear) - bump_bit
-          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
-        bump_bit = 0;
-        assert((abase & to_clear) == 0, "array base must be long-aligned");
-      } else {
-        // Bump 'start' up to (or past) the next jint boundary:
-        start = _gvn.transform(new AddXNode(start, MakeConX(bump_bit)));
-        assert((abase & clear_low) == 0, "array base must be int-aligned");
-      }
-      // Round bumped 'start' down to jlong boundary in body of array.
-      start = _gvn.transform(new AndXNode(start, MakeConX(~to_clear)));
-      if (bump_bit != 0) {
-        // Store a zero to the immediately preceding jint:
-        Node* x1 = _gvn.transform(new AddXNode(start, MakeConX(-bump_bit)));
-        Node* p1 = basic_plus_adr(dest, x1);
-        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
-        mem = _gvn.transform(mem);
-      }
-    }
-    Node* end = dest_size; // pre-rounded
-    mem = ClearArrayNode::clear_memory(control(), mem, dest,
-                                       start, end, &_gvn);
-  } else {
-    // Non-constant start, unrounded non-constant end.
-    // (Nobody zeroes a random midsection of an array using this routine.)
-    ShouldNotReachHere();       // fix caller
-  }
-
-  // Done.
-  set_memory(mem, adr_type);
-}
-
-
-bool
-LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
-                                         BasicType basic_elem_type,
-                                         AllocateNode* alloc,
-                                         Node* src,  Node* src_offset,
-                                         Node* dest, Node* dest_offset,
-                                         Node* dest_size, bool dest_uninitialized) {
-  // See if there is an advantage from block transfer.
-  int scale = exact_log2(type2aelembytes(basic_elem_type));
-  if (scale >= LogBytesPerLong)
-    return false;               // it is already a block transfer
-
-  // Look at the alignment of the starting offsets.
-  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
-
-  intptr_t src_off_con  = (intptr_t) find_int_con(src_offset, -1);
-  intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
-  if (src_off_con < 0 || dest_off_con < 0)
-    // At present, we can only understand constants.
-    return false;
-
-  intptr_t src_off  = abase + (src_off_con  << scale);
-  intptr_t dest_off = abase + (dest_off_con << scale);
-
-  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
-    // Non-aligned; too bad.
-    // One more chance:  Pick off an initial 32-bit word.
-    // This is a common case, since abase can be odd mod 8.
-    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
-        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
-      Node* sptr = basic_plus_adr(src,  src_off);
-      Node* dptr = basic_plus_adr(dest, dest_off);
-      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
-      store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
-      src_off += BytesPerInt;
-      dest_off += BytesPerInt;
-    } else {
-      return false;
-    }
-  }
-  assert(src_off % BytesPerLong == 0, "");
-  assert(dest_off % BytesPerLong == 0, "");
-
-  // Do this copy by giant steps.
-  Node* sptr  = basic_plus_adr(src,  src_off);
-  Node* dptr  = basic_plus_adr(dest, dest_off);
-  Node* countx = dest_size;
-  countx = _gvn.transform(new SubXNode(countx, MakeConX(dest_off)));
-  countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
-
-  bool disjoint_bases = true;   // since alloc != NULL
-  generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
-                               sptr, NULL, dptr, NULL, countx, dest_uninitialized);
-
-  return true;
-}
-
-
-// Helper function; generates code for the slow case.
-// We make a call to a runtime method which emulates the native method,
-// but without the native wrapper overhead.
-void
-LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
-                                        Node* src,  Node* src_offset,
-                                        Node* dest, Node* dest_offset,
-                                        Node* copy_length, bool dest_uninitialized) {
-  assert(!dest_uninitialized, "Invariant");
-  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
-                                 OptoRuntime::slow_arraycopy_Type(),
-                                 OptoRuntime::slow_arraycopy_Java(),
-                                 "slow_arraycopy", adr_type,
-                                 src, src_offset, dest, dest_offset,
-                                 copy_length);
-
-  // Handle exceptions thrown by this fellow:
-  make_slow_call_ex(call, env()->Throwable_klass(), false);
-}
-
-// Helper function; generates code for cases requiring runtime checks.
-Node*
-LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
-                                             Node* dest_elem_klass,
-                                             Node* src,  Node* src_offset,
-                                             Node* dest, Node* dest_offset,
-                                             Node* copy_length, bool dest_uninitialized) {
-  if (stopped())  return NULL;
-
-  address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
-  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
-    return NULL;
-  }
-
-  // Pick out the parameters required to perform a store-check
-  // for the target array.  This is an optimistic check.  It will
-  // look in each non-null element's class, at the desired klass's
-  // super_check_offset, for the desired klass.
-  int sco_offset = in_bytes(Klass::super_check_offset_offset());
-  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
-  Node* n3 = new LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
-  Node* check_offset = ConvI2X(_gvn.transform(n3));
-  Node* check_value  = dest_elem_klass;
-
-  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
-  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
-
-  // (We know the arrays are never conjoint, because their types differ.)
-  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
-                                 OptoRuntime::checkcast_arraycopy_Type(),
-                                 copyfunc_addr, "checkcast_arraycopy", adr_type,
-                                 // five arguments, of which two are
-                                 // intptr_t (jlong in LP64)
-                                 src_start, dest_start,
-                                 copy_length XTOP,
-                                 check_offset XTOP,
-                                 check_value);
-
-  return _gvn.transform(new ProjNode(call, TypeFunc::Parms));
-}
-
-
-// Helper function; generates code for cases requiring runtime checks.
-Node*
-LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
-                                           Node* src,  Node* src_offset,
-                                           Node* dest, Node* dest_offset,
-                                           Node* copy_length, bool dest_uninitialized) {
-  assert(!dest_uninitialized, "Invariant");
-  if (stopped())  return NULL;
-  address copyfunc_addr = StubRoutines::generic_arraycopy();
-  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
-    return NULL;
-  }
-
-  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
-                    OptoRuntime::generic_arraycopy_Type(),
-                    copyfunc_addr, "generic_arraycopy", adr_type,
-                    src, src_offset, dest, dest_offset, copy_length);
-
-  return _gvn.transform(new ProjNode(call, TypeFunc::Parms));
-}
-
-// Helper function; generates the fast out-of-line call to an arraycopy stub.
-void
-LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
-                                             BasicType basic_elem_type,
-                                             bool disjoint_bases,
-                                             Node* src,  Node* src_offset,
-                                             Node* dest, Node* dest_offset,
-                                             Node* copy_length, bool dest_uninitialized) {
-  if (stopped())  return;               // nothing to do
-
-  Node* src_start  = src;
-  Node* dest_start = dest;
-  if (src_offset != NULL || dest_offset != NULL) {
-    assert(src_offset != NULL && dest_offset != NULL, "");
-    src_start  = array_element_address(src,  src_offset,  basic_elem_type);
-    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
-  }
-
-  // Figure out which arraycopy runtime method to call.
-  const char* copyfunc_name = "arraycopy";
-  address     copyfunc_addr =
-      basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
-                          disjoint_bases, copyfunc_name, dest_uninitialized);
-
-  // Call it.  Note that the count_ix value is not scaled to a byte-size.
-  make_runtime_call(RC_LEAF|RC_NO_FP,
-                    OptoRuntime::fast_arraycopy_Type(),
-                    copyfunc_addr, copyfunc_name, adr_type,
-                    src_start, dest_start, copy_length XTOP);
-}
-
 //-------------inline_encodeISOArray-----------------------------------
 // encode char[] to byte[] in ISO_8859_1
 bool LibraryCallKit::inline_encodeISOArray() {
@@ -5915,8 +5102,19 @@
     type = Type::get_const_basic_type(bt);
   }
 
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
+    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+  }
   // Build the load.
-  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
+  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
+  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+  // If reference is volatile, prevent following memory ops from
+  // floating up past the volatile read.  Also prevents commoning
+  // another volatile read.
+  if (is_vol) {
+    // Memory barrier includes bogus read of value to force load BEFORE membar
+    insert_mem_bar(Op_MemBarAcquire, loadedField);
+  }
   return loadedField;
 }
 
--- a/hotspot/src/share/vm/opto/loopPredicate.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopPredicate.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -763,9 +763,7 @@
         loop->dump_head();
       }
 #endif
-    } else if ((cl != NULL) && (proj->_con == predicate_proj->_con) &&
-               loop->is_range_check_if(iff, this, invar)) {
-
+    } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
       // Range check for counted loops
       const Node*    cmp    = bol->in(1)->as_Cmp();
       Node*          idx    = cmp->in(1);
@@ -800,18 +798,31 @@
       }
 
       // Test the lower bound
-      Node*  lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
+      BoolNode*  lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false);
+      // Negate test if necessary
+      bool negated = false;
+      if (proj->_con != predicate_proj->_con) {
+        lower_bound_bol = new BoolNode(lower_bound_bol->in(1), lower_bound_bol->_test.negate());
+        register_new_node(lower_bound_bol, ctrl);
+        negated = true;
+      }
       IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
       _igvn.hash_delete(lower_bound_iff);
       lower_bound_iff->set_req(1, lower_bound_bol);
-      if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
+      if (TraceLoopPredicate) tty->print_cr("lower bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
 
       // Test the upper bound
-      Node* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true);
+      BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true);
+      negated = false;
+      if (proj->_con != predicate_proj->_con) {
+        upper_bound_bol = new BoolNode(upper_bound_bol->in(1), upper_bound_bol->_test.negate());
+        register_new_node(upper_bound_bol, ctrl);
+        negated = true;
+      }
       IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
       _igvn.hash_delete(upper_bound_iff);
       upper_bound_iff->set_req(1, upper_bound_bol);
-      if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
+      if (TraceLoopPredicate) tty->print_cr("upper bound check if: %s %d ", negated ? " negated" : "", lower_bound_iff->_idx);
 
       // Fall through into rest of the clean up code which will move
       // any dependent nodes onto the upper bound test.
--- a/hotspot/src/share/vm/opto/macro.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -94,7 +94,8 @@
     newcall->add_req(old_in);
   }
 
-  newcall->set_jvms(oldcall->jvms());
+  // JVMS may be shared so clone it before we modify it
+  newcall->set_jvms(oldcall->jvms() != NULL ? oldcall->jvms()->clone_deep(C) : NULL);
   for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) {
     jvms->set_map(newcall);
     jvms->set_locoff(jvms->locoff()+jvms_adj);
@@ -2469,6 +2470,8 @@
         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
         _has_locks = true;
         break;
+      case Node::Class_ArrayCopy:
+        break;
       default:
         assert(n->Opcode() == Op_LoopLimit ||
                n->Opcode() == Op_Opaque1   ||
@@ -2544,6 +2547,25 @@
     }
   }
 
+  // expand arraycopy "macro" nodes first
+  // For ReduceBulkZeroing, we must first process all arraycopy nodes
+  // before the allocate nodes are expanded.
+  int macro_idx = C->macro_count() - 1;
+  while (macro_idx >= 0) {
+    Node * n = C->macro_node(macro_idx);
+    assert(n->is_macro(), "only macro nodes expected here");
+    if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
+      // node is unreachable, so don't try to expand it
+      C->remove_macro_node(n);
+    } else if (n->is_ArrayCopy()){
+      int macro_count = C->macro_count();
+      expand_arraycopy_node(n->as_ArrayCopy());
+      assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
+    }
+    if (C->failing())  return true;
+    macro_idx --;
+  }
+
   // expand "macro" nodes
   // nodes are removed from the macro list as they are processed
   while (C->macro_count() > 0) {
--- a/hotspot/src/share/vm/opto/macro.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -37,7 +37,7 @@
 private:
   PhaseIterGVN &_igvn;
 
-  // Helper methods roughly modelled after GraphKit:
+  // Helper methods roughly modeled after GraphKit:
   Node* top()                   const { return C->top(); }
   Node* intcon(jint con)        const { return _igvn.intcon(con); }
   Node* longcon(jlong con)      const { return _igvn.longcon(con); }
@@ -101,6 +101,86 @@
   void expand_lock_node(LockNode *lock);
   void expand_unlock_node(UnlockNode *unlock);
 
+  // More helper methods modeled after GraphKit for array copy
+  void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = NULL);
+  Node* array_element_address(Node* ary, Node* idx, BasicType elembt);
+  Node* ConvI2L(Node* offset);
+  Node* make_leaf_call(Node* ctrl, Node* mem,
+                       const TypeFunc* call_type, address call_addr,
+                       const char* call_name,
+                       const TypePtr* adr_type,
+                       Node* parm0 = NULL, Node* parm1 = NULL,
+                       Node* parm2 = NULL, Node* parm3 = NULL,
+                       Node* parm4 = NULL, Node* parm5 = NULL,
+                       Node* parm6 = NULL, Node* parm7 = NULL);
+
+  // helper methods modeled after LibraryCallKit for array copy
+  Node* generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob);
+  Node* generate_slow_guard(Node** ctrl, Node* test, RegionNode* region);
+  void generate_negative_guard(Node** ctrl, Node* index, RegionNode* region);
+  void generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region);
+
+  // More helper methods for array copy
+  Node* generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative);
+  void finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type);
+  address basictype2arraycopy(BasicType t,
+                              Node* src_offset,
+                              Node* dest_offset,
+                              bool disjoint_bases,
+                              const char* &name,
+                              bool dest_uninitialized);
+  Node* generate_arraycopy(ArrayCopyNode *ac,
+                           AllocateArrayNode* alloc,
+                           Node** ctrl, MergeMemNode* mem, Node** io,
+                           const TypePtr* adr_type,
+                           BasicType basic_elem_type,
+                           Node* src,  Node* src_offset,
+                           Node* dest, Node* dest_offset,
+                           Node* copy_length,
+                           bool disjoint_bases = false,
+                           bool length_never_negative = false,
+                           RegionNode* slow_region = NULL);
+  void generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
+                            const TypePtr* adr_type,
+                            Node* dest,
+                            BasicType basic_elem_type,
+                            Node* slice_idx,
+                            Node* slice_len,
+                            Node* dest_size);
+  bool generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
+                                const TypePtr* adr_type,
+                                BasicType basic_elem_type,
+                                AllocateNode* alloc,
+                                Node* src,  Node* src_offset,
+                                Node* dest, Node* dest_offset,
+                                Node* dest_size, bool dest_uninitialized);
+  MergeMemNode* generate_slow_arraycopy(ArrayCopyNode *ac,
+                                        Node** ctrl, Node* mem, Node** io,
+                                        const TypePtr* adr_type,
+                                        Node* src,  Node* src_offset,
+                                        Node* dest, Node* dest_offset,
+                                        Node* copy_length, bool dest_uninitialized);
+  Node* generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                     const TypePtr* adr_type,
+                                     Node* dest_elem_klass,
+                                     Node* src,  Node* src_offset,
+                                     Node* dest, Node* dest_offset,
+                                     Node* copy_length, bool dest_uninitialized);
+  Node* generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                   const TypePtr* adr_type,
+                                   Node* src,  Node* src_offset,
+                                   Node* dest, Node* dest_offset,
+                                   Node* copy_length, bool dest_uninitialized);
+  void generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                    const TypePtr* adr_type,
+                                    BasicType basic_elem_type,
+                                    bool disjoint_bases,
+                                    Node* src,  Node* src_offset,
+                                    Node* dest, Node* dest_offset,
+                                    Node* copy_length, bool dest_uninitialized);
+
+  void expand_arraycopy_node(ArrayCopyNode *ac);
+
   int replace_input(Node *use, Node *oldref, Node *newref);
   void copy_call_debug_info(CallNode *oldcall, CallNode * newcall);
   Node* opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path = false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "opto/convertnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/macro.hpp"
+#include "opto/runtime.hpp"
+
+
+void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) {
+  MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
+  mb->init_req(TypeFunc::Control, *ctrl);
+  mb->init_req(TypeFunc::Memory, *mem);
+  transform_later(mb);
+  *ctrl = new ProjNode(mb,TypeFunc::Control);
+  transform_later(*ctrl);
+  Node* mem_proj = new ProjNode(mb,TypeFunc::Memory);
+  transform_later(mem_proj);
+  *mem = mem_proj;
+}
+
+Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) {
+  uint shift  = exact_log2(type2aelembytes(elembt));
+  uint header = arrayOopDesc::base_offset_in_bytes(elembt);
+  Node* base =  basic_plus_adr(ary, header);
+#ifdef _LP64
+  // see comment in GraphKit::array_element_address
+  int index_max = max_jint - 1;  // array size is max_jint, index is one less
+  const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
+  idx = transform_later( new ConvI2LNode(idx, lidxtype) );
+#endif
+  Node* scale = new LShiftXNode(idx, intcon(shift));
+  transform_later(scale);
+  return basic_plus_adr(ary, base, scale);
+}
+
+Node* PhaseMacroExpand::ConvI2L(Node* offset) {
+  return transform_later(new ConvI2LNode(offset));
+}
+
+Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem,
+                                       const TypeFunc* call_type, address call_addr,
+                                       const char* call_name,
+                                       const TypePtr* adr_type,
+                                       Node* parm0, Node* parm1,
+                                       Node* parm2, Node* parm3,
+                                       Node* parm4, Node* parm5,
+                                       Node* parm6, Node* parm7) {
+  int size = call_type->domain()->cnt();
+  Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
+  call->init_req(TypeFunc::Control, ctrl);
+  call->init_req(TypeFunc::I_O    , top());
+  call->init_req(TypeFunc::Memory , mem);
+  call->init_req(TypeFunc::ReturnAdr, top());
+  call->init_req(TypeFunc::FramePtr, top());
+
+  // Hook each parm in order.  Stop looking at the first NULL.
+  if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
+  if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
+  if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
+  if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
+  if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
+  if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
+  if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
+  if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
+    /* close each nested if ===> */  } } } } } } } }
+  assert(call->in(call->req()-1) != NULL, "must initialize all parms");
+
+  return call;
+}
+
+
+//------------------------------generate_guard---------------------------
+// Helper function for generating guarded fast-slow graph structures.
+// The given 'test', if true, guards a slow path.  If the test fails
+// then a fast path can be taken.  (We generally hope it fails.)
+// In all cases, GraphKit::control() is updated to the fast path.
+// The returned value represents the control for the slow path.
+// The return value is never 'top'; it is either a valid control
+// or NULL if it is obvious that the slow path can never be taken.
+// Also, if region and the slow control are not NULL, the slow edge
+// is appended to the region.
+Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) {
+  if ((*ctrl)->is_top()) {
+    // Already short circuited.
+    return NULL;
+  }
+  // Build an if node and its projections.
+  // If test is true we take the slow path, which we assume is uncommon.
+  if (_igvn.type(test) == TypeInt::ZERO) {
+    // The slow branch is never taken.  No need to build this guard.
+    return NULL;
+  }
+
+  IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN);
+  transform_later(iff);
+
+  Node* if_slow = new IfTrueNode(iff);
+  transform_later(if_slow);
+
+  if (region != NULL) {
+    region->add_req(if_slow);
+  }
+
+  Node* if_fast = new IfFalseNode(iff);
+  transform_later(if_fast);
+
+  *ctrl = if_fast;
+
+  return if_slow;
+}
+
+inline Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) {
+  return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3));
+}
+
+void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) {
+  if ((*ctrl)->is_top())
+    return;                // already stopped
+  if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
+    return;                // index is already adequately typed
+  Node* cmp_lt = new CmpINode(index, intcon(0));
+  transform_later(cmp_lt);
+  Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
+  transform_later(bol_lt);
+  generate_guard(ctrl, bol_lt, region, PROB_MIN);
+}
+
+void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) {
+  if ((*ctrl)->is_top())
+    return;                // already stopped
+  bool zero_offset = _igvn.type(offset) == TypeInt::ZERO;
+  if (zero_offset && subseq_length->eqv_uncast(array_length))
+    return;                // common case of whole-array copy
+  Node* last = subseq_length;
+  if (!zero_offset) {            // last += offset
+    last = new AddINode(last, offset);
+    transform_later(last);
+  }
+  Node* cmp_lt = new CmpUNode(array_length, last);
+  transform_later(cmp_lt);
+  Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
+  transform_later(bol_lt);
+  generate_guard(ctrl, bol_lt, region, PROB_MIN);
+}
+
+Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) {
+  if ((*ctrl)->is_top())  return NULL;
+
+  if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
+    return NULL;                // index is already adequately typed
+  Node* cmp_le = new CmpINode(index, intcon(0));
+  transform_later(cmp_le);
+  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
+  Node* bol_le = new BoolNode(cmp_le, le_or_eq);
+  transform_later(bol_le);
+  Node* is_notp = generate_guard(ctrl, bol_le, NULL, PROB_MIN);
+
+  return is_notp;
+}
+
+void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) {
+  transform_later(call);
+
+  *ctrl = new ProjNode(call,TypeFunc::Control);
+  transform_later(*ctrl);
+  Node* newmem = new ProjNode(call, TypeFunc::Memory);
+  transform_later(newmem);
+
+  uint alias_idx = C->get_alias_index(adr_type);
+  if (alias_idx != Compile::AliasIdxBot) {
+    *mem = MergeMemNode::make(*mem);
+    (*mem)->set_memory_at(alias_idx, newmem);
+  } else {
+    *mem = MergeMemNode::make(newmem);
+  }
+  transform_later(*mem);
+}
+
+address PhaseMacroExpand::basictype2arraycopy(BasicType t,
+                                              Node* src_offset,
+                                              Node* dest_offset,
+                                              bool disjoint_bases,
+                                              const char* &name,
+                                              bool dest_uninitialized) {
+  const TypeInt* src_offset_inttype  = _igvn.find_int_type(src_offset);;
+  const TypeInt* dest_offset_inttype = _igvn.find_int_type(dest_offset);;
+
+  bool aligned = false;
+  bool disjoint = disjoint_bases;
+
+  // if the offsets are the same, we can treat the memory regions as
+  // disjoint, because either the memory regions are in different arrays,
+  // or they are identical (which we can treat as disjoint.)  We can also
+  // treat a copy with a destination index  less that the source index
+  // as disjoint since a low->high copy will work correctly in this case.
+  if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
+      dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
+    // both indices are constants
+    int s_offs = src_offset_inttype->get_con();
+    int d_offs = dest_offset_inttype->get_con();
+    int element_size = type2aelembytes(t);
+    aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
+              ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
+    if (s_offs >= d_offs)  disjoint = true;
+  } else if (src_offset == dest_offset && src_offset != NULL) {
+    // This can occur if the offsets are identical non-constants.
+    disjoint = true;
+  }
+
+  return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
+}
+
+#define COMMA ,
+#define XTOP LP64_ONLY(COMMA top())
+
+// Generate an optimized call to arraycopy.
+// Caller must guard against non-arrays.
+// Caller must determine a common array basic-type for both arrays.
+// Caller must validate offsets against array bounds.
+// The slow_region has already collected guard failure paths
+// (such as out of bounds length or non-conformable array types).
+// The generated code has this shape, in general:
+//
+//     if (length == 0)  return   // via zero_path
+//     slowval = -1
+//     if (types unknown) {
+//       slowval = call generic copy loop
+//       if (slowval == 0)  return  // via checked_path
+//     } else if (indexes in bounds) {
+//       if ((is object array) && !(array type check)) {
+//         slowval = call checked copy loop
+//         if (slowval == 0)  return  // via checked_path
+//       } else {
+//         call bulk copy loop
+//         return  // via fast_path
+//       }
+//     }
+//     // adjust params for remaining work:
+//     if (slowval != -1) {
+//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
+//     }
+//   slow_region:
+//     call slow arraycopy(src, src_offset, dest, dest_offset, length)
+//     return  // via slow_call_path
+//
+// This routine is used from several intrinsics:  System.arraycopy,
+// Object.clone (the array subcase), and Arrays.copyOf[Range].
+//
+Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc,
+                                           Node** ctrl, MergeMemNode* mem, Node** io,
+                                           const TypePtr* adr_type,
+                                           BasicType basic_elem_type,
+                                           Node* src,  Node* src_offset,
+                                           Node* dest, Node* dest_offset,
+                                           Node* copy_length,
+                                           bool disjoint_bases,
+                                           bool length_never_negative,
+                                           RegionNode* slow_region) {
+  if (slow_region == NULL) {
+    slow_region = new RegionNode(1);
+    transform_later(slow_region);
+  }
+
+  Node* original_dest      = dest;
+  bool  dest_uninitialized = false;
+
+  // See if this is the initialization of a newly-allocated array.
+  // If so, we will take responsibility here for initializing it to zero.
+  // (Note:  Because tightly_coupled_allocation performs checks on the
+  // out-edges of the dest, we need to avoid making derived pointers
+  // from it until we have checked its uses.)
+  if (ReduceBulkZeroing
+      && !ZeroTLAB              // pointless if already zeroed
+      && basic_elem_type != T_CONFLICT // avoid corner case
+      && !src->eqv_uncast(dest)
+      && alloc != NULL
+      && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
+      && alloc->maybe_set_complete(&_igvn)) {
+    // "You break it, you buy it."
+    InitializeNode* init = alloc->initialization();
+    assert(init->is_complete(), "we just did this");
+    init->set_complete_with_arraycopy();
+    assert(dest->is_CheckCastPP(), "sanity");
+    assert(dest->in(0)->in(0) == init, "dest pinned");
+    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
+    // From this point on, every exit path is responsible for
+    // initializing any non-copied parts of the object to zero.
+    // Also, if this flag is set we make sure that arraycopy interacts properly
+    // with G1, eliding pre-barriers. See CR 6627983.
+    dest_uninitialized = true;
+  } else {
+    // No zeroing elimination here.
+    alloc             = NULL;
+    //original_dest   = dest;
+    //dest_uninitialized = false;
+  }
+
+  uint alias_idx = C->get_alias_index(adr_type);
+
+  // Results are placed here:
+  enum { fast_path        = 1,  // normal void-returning assembly stub
+         checked_path     = 2,  // special assembly stub with cleanup
+         slow_call_path   = 3,  // something went wrong; call the VM
+         zero_path        = 4,  // bypass when length of copy is zero
+         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
+         PATH_LIMIT       = 6
+  };
+  RegionNode* result_region = new RegionNode(PATH_LIMIT);
+  PhiNode*    result_i_o    = new PhiNode(result_region, Type::ABIO);
+  PhiNode*    result_memory = new PhiNode(result_region, Type::MEMORY, adr_type);
+  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
+  transform_later(result_region);
+  transform_later(result_i_o);
+  transform_later(result_memory);
+
+  // The slow_control path:
+  Node* slow_control;
+  Node* slow_i_o = *io;
+  Node* slow_mem = mem->memory_at(alias_idx);
+  DEBUG_ONLY(slow_control = (Node*) badAddress);
+
+  // Checked control path:
+  Node* checked_control = top();
+  Node* checked_mem     = NULL;
+  Node* checked_i_o     = NULL;
+  Node* checked_value   = NULL;
+
+  if (basic_elem_type == T_CONFLICT) {
+    assert(!dest_uninitialized, "");
+    Node* cv = generate_generic_arraycopy(ctrl, &mem,
+                                          adr_type,
+                                          src, src_offset, dest, dest_offset,
+                                          copy_length, dest_uninitialized);
+    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+    checked_control = *ctrl;
+    checked_i_o     = *io;
+    checked_mem     = mem->memory_at(alias_idx);
+    checked_value   = cv;
+    *ctrl = top();
+  }
+
+  Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative);
+  if (not_pos != NULL) {
+    Node* local_ctrl = not_pos, *local_io = *io;
+    MergeMemNode* local_mem = MergeMemNode::make(mem);
+    transform_later(local_mem);
+
+    // (6) length must not be negative.
+    if (!length_never_negative) {
+      generate_negative_guard(&local_ctrl, copy_length, slow_region);
+    }
+
+    // copy_length is 0.
+    if (dest_uninitialized) {
+      assert(!local_ctrl->is_top(), "no ctrl?");
+      Node* dest_length = alloc->in(AllocateNode::ALength);
+      if (copy_length->eqv_uncast(dest_length)
+          || _igvn.find_int_con(dest_length, 1) <= 0) {
+        // There is no zeroing to do. No need for a secondary raw memory barrier.
+      } else {
+        // Clear the whole thing since there are no source elements to copy.
+        generate_clear_array(local_ctrl, local_mem,
+                             adr_type, dest, basic_elem_type,
+                             intcon(0), NULL,
+                             alloc->in(AllocateNode::AllocSize));
+        // Use a secondary InitializeNode as raw memory barrier.
+        // Currently it is needed only on this path since other
+        // paths have stub or runtime calls as raw memory barriers.
+        MemBarNode* mb = MemBarNode::make(C, Op_Initialize,
+                                          Compile::AliasIdxRaw,
+                                          top());
+        transform_later(mb);
+        mb->set_req(TypeFunc::Control,local_ctrl);
+        mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw));
+        local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control));
+        local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory)));
+
+        InitializeNode* init = mb->as_Initialize();
+        init->set_complete(&_igvn);  // (there is no corresponding AllocateNode)
+      }
+    }
+
+    // Present the results of the fast call.
+    result_region->init_req(zero_path, local_ctrl);
+    result_i_o   ->init_req(zero_path, local_io);
+    result_memory->init_req(zero_path, local_mem->memory_at(alias_idx));
+  }
+
+  if (!(*ctrl)->is_top() && dest_uninitialized) {
+    // We have to initialize the *uncopied* part of the array to zero.
+    // The copy destination is the slice dest[off..off+len].  The other slices
+    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
+    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
+    Node* dest_length = alloc->in(AllocateNode::ALength);
+    Node* dest_tail   = transform_later( new AddINode(dest_offset, copy_length));
+
+    // If there is a head section that needs zeroing, do it now.
+    if (_igvn.find_int_con(dest_offset, -1) != 0) {
+      generate_clear_array(*ctrl, mem,
+                           adr_type, dest, basic_elem_type,
+                           intcon(0), dest_offset,
+                           NULL);
+    }
+
+    // Next, perform a dynamic check on the tail length.
+    // It is often zero, and we can win big if we prove this.
+    // There are two wins:  Avoid generating the ClearArray
+    // with its attendant messy index arithmetic, and upgrade
+    // the copy to a more hardware-friendly word size of 64 bits.
+    Node* tail_ctl = NULL;
+    if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) {
+      Node* cmp_lt   = transform_later( new CmpINode(dest_tail, dest_length) );
+      Node* bol_lt   = transform_later( new BoolNode(cmp_lt, BoolTest::lt) );
+      tail_ctl = generate_slow_guard(ctrl, bol_lt, NULL);
+      assert(tail_ctl != NULL || !(*ctrl)->is_top(), "must be an outcome");
+    }
+
+    // At this point, let's assume there is no tail.
+    if (!(*ctrl)->is_top() && alloc != NULL && basic_elem_type != T_OBJECT) {
+      // There is no tail.  Try an upgrade to a 64-bit copy.
+      bool didit = false;
+      {
+        Node* local_ctrl = *ctrl, *local_io = *io;
+        MergeMemNode* local_mem = MergeMemNode::make(mem);
+        transform_later(local_mem);
+
+        didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io,
+                                         adr_type, basic_elem_type, alloc,
+                                         src, src_offset, dest, dest_offset,
+                                         dest_size, dest_uninitialized);
+        if (didit) {
+          // Present the results of the block-copying fast call.
+          result_region->init_req(bcopy_path, local_ctrl);
+          result_i_o   ->init_req(bcopy_path, local_io);
+          result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx));
+        }
+      }
+      if (didit) {
+        *ctrl = top();     // no regular fast path
+      }
+    }
+
+    // Clear the tail, if any.
+    if (tail_ctl != NULL) {
+      Node* notail_ctl = (*ctrl)->is_top() ? NULL : *ctrl;
+      *ctrl = tail_ctl;
+      if (notail_ctl == NULL) {
+        generate_clear_array(*ctrl, mem,
+                             adr_type, dest, basic_elem_type,
+                             dest_tail, NULL,
+                             dest_size);
+      } else {
+        // Make a local merge.
+        Node* done_ctl = transform_later(new RegionNode(3));
+        Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type));
+        done_ctl->init_req(1, notail_ctl);
+        done_mem->init_req(1, mem->memory_at(alias_idx));
+        generate_clear_array(*ctrl, mem,
+                             adr_type, dest, basic_elem_type,
+                             dest_tail, NULL,
+                             dest_size);
+        done_ctl->init_req(2, *ctrl);
+        done_mem->init_req(2, mem->memory_at(alias_idx));
+        *ctrl = done_ctl;
+        mem->set_memory_at(alias_idx, done_mem);
+      }
+    }
+  }
+
+  BasicType copy_type = basic_elem_type;
+  assert(basic_elem_type != T_ARRAY, "caller must fix this");
+  if (!(*ctrl)->is_top() && copy_type == T_OBJECT) {
+    // If src and dest have compatible element types, we can copy bits.
+    // Types S[] and D[] are compatible if D is a supertype of S.
+    //
+    // If they are not, we will use checked_oop_disjoint_arraycopy,
+    // which performs a fast optimistic per-oop check, and backs off
+    // further to JVM_ArrayCopy on the first per-oop check that fails.
+    // (Actually, we don't move raw bits only; the GC requires card marks.)
+
+    // Get the klass* for both src and dest
+    Node* src_klass  = ac->in(ArrayCopyNode::SrcKlass);
+    Node* dest_klass = ac->in(ArrayCopyNode::DestKlass);
+
+    // Generate the subtype check.
+    // This might fold up statically, or then again it might not.
+    //
+    // Non-static example:  Copying List<String>.elements to a new String[].
+    // The backing store for a List<String> is always an Object[],
+    // but its elements are always type String, if the generic types
+    // are correct at the source level.
+    //
+    // Test S[] against D[], not S against D, because (probably)
+    // the secondary supertype cache is less busy for S[] than S.
+    // This usually only matters when D is an interface.
+    Node* not_subtype_ctrl = ac->is_arraycopy_notest() ? top() : Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, &_igvn);
+    // Plug failing path into checked_oop_disjoint_arraycopy
+    if (not_subtype_ctrl != top()) {
+      Node* local_ctrl = not_subtype_ctrl;
+      MergeMemNode* local_mem = MergeMemNode::make(mem);
+      transform_later(local_mem);
+
+      // (At this point we can assume disjoint_bases, since types differ.)
+      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
+      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
+      Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
+      Node* dest_elem_klass = transform_later(n1);
+      Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
+                                              adr_type,
+                                              dest_elem_klass,
+                                              src, src_offset, dest, dest_offset,
+                                              ConvI2X(copy_length), dest_uninitialized);
+      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
+      checked_control = local_ctrl;
+      checked_i_o     = *io;
+      checked_mem     = local_mem->memory_at(alias_idx);
+      checked_value   = cv;
+    }
+    // At this point we know we do not need type checks on oop stores.
+
+    // Let's see if we need card marks:
+    if (alloc != NULL && GraphKit::use_ReduceInitialCardMarks()) {
+      // If we do not need card marks, copy using the jint or jlong stub.
+      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
+      assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
+             "sizes agree");
+    }
+  }
+
+  if (!(*ctrl)->is_top()) {
+    // Generate the fast path, if possible.
+    Node* local_ctrl = *ctrl;
+    MergeMemNode* local_mem = MergeMemNode::make(mem);
+    transform_later(local_mem);
+
+    generate_unchecked_arraycopy(&local_ctrl, &local_mem,
+                                 adr_type, copy_type, disjoint_bases,
+                                 src, src_offset, dest, dest_offset,
+                                 ConvI2X(copy_length), dest_uninitialized);
+
+    // Present the results of the fast call.
+    result_region->init_req(fast_path, local_ctrl);
+    result_i_o   ->init_req(fast_path, *io);
+    result_memory->init_req(fast_path, local_mem->memory_at(alias_idx));
+  }
+
+  // Here are all the slow paths up to this point, in one bundle:
+  assert(slow_region != NULL, "allocated on entry");
+  slow_control = slow_region;
+  DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
+
+  *ctrl = checked_control;
+  if (!(*ctrl)->is_top()) {
+    // Clean up after the checked call.
+    // The returned value is either 0 or -1^K,
+    // where K = number of partially transferred array elements.
+    Node* cmp = new CmpINode(checked_value, intcon(0));
+    transform_later(cmp);
+    Node* bol = new BoolNode(cmp, BoolTest::eq);
+    transform_later(bol);
+    IfNode* iff = new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN);
+    transform_later(iff);
+
+    // If it is 0, we are done, so transfer to the end.
+    Node* checks_done = new IfTrueNode(iff);
+    transform_later(checks_done);
+    result_region->init_req(checked_path, checks_done);
+    result_i_o   ->init_req(checked_path, checked_i_o);
+    result_memory->init_req(checked_path, checked_mem);
+
+    // If it is not zero, merge into the slow call.
+    *ctrl = new IfFalseNode(iff);
+    transform_later(*ctrl);
+    RegionNode* slow_reg2 = new RegionNode(3);
+    PhiNode*    slow_i_o2 = new PhiNode(slow_reg2, Type::ABIO);
+    PhiNode*    slow_mem2 = new PhiNode(slow_reg2, Type::MEMORY, adr_type);
+    transform_later(slow_reg2);
+    transform_later(slow_i_o2);
+    transform_later(slow_mem2);
+    slow_reg2  ->init_req(1, slow_control);
+    slow_i_o2  ->init_req(1, slow_i_o);
+    slow_mem2  ->init_req(1, slow_mem);
+    slow_reg2  ->init_req(2, *ctrl);
+    slow_i_o2  ->init_req(2, checked_i_o);
+    slow_mem2  ->init_req(2, checked_mem);
+
+    slow_control = slow_reg2;
+    slow_i_o     = slow_i_o2;
+    slow_mem     = slow_mem2;
+
+    if (alloc != NULL) {
+      // We'll restart from the very beginning, after zeroing the whole thing.
+      // This can cause double writes, but that's OK since dest is brand new.
+      // So we ignore the low 31 bits of the value returned from the stub.
+    } else {
+      // We must continue the copy exactly where it failed, or else
+      // another thread might see the wrong number of writes to dest.
+      Node* checked_offset = new XorINode(checked_value, intcon(-1));
+      Node* slow_offset    = new PhiNode(slow_reg2, TypeInt::INT);
+      transform_later(checked_offset);
+      transform_later(slow_offset);
+      slow_offset->init_req(1, intcon(0));
+      slow_offset->init_req(2, checked_offset);
+
+      // Adjust the arguments by the conditionally incoming offset.
+      Node* src_off_plus  = new AddINode(src_offset,  slow_offset);
+      transform_later(src_off_plus);
+      Node* dest_off_plus = new AddINode(dest_offset, slow_offset);
+      transform_later(dest_off_plus);
+      Node* length_minus  = new SubINode(copy_length, slow_offset);
+      transform_later(length_minus);
+
+      // Tweak the node variables to adjust the code produced below:
+      src_offset  = src_off_plus;
+      dest_offset = dest_off_plus;
+      copy_length = length_minus;
+    }
+  }
+  *ctrl = slow_control;
+  if (!(*ctrl)->is_top()) {
+    Node* local_ctrl = *ctrl, *local_io = slow_i_o;
+    MergeMemNode* local_mem = MergeMemNode::make(mem);
+    transform_later(local_mem);
+
+    // Generate the slow path, if needed.
+    local_mem->set_memory_at(alias_idx, slow_mem);
+
+    if (dest_uninitialized) {
+      generate_clear_array(local_ctrl, local_mem,
+                           adr_type, dest, basic_elem_type,
+                           intcon(0), NULL,
+                           alloc->in(AllocateNode::AllocSize));
+    }
+
+    local_mem = generate_slow_arraycopy(ac,
+                                        &local_ctrl, local_mem, &local_io,
+                                        adr_type,
+                                        src, src_offset, dest, dest_offset,
+                                        copy_length, /*dest_uninitialized*/false);
+
+    result_region->init_req(slow_call_path, local_ctrl);
+    result_i_o   ->init_req(slow_call_path, local_io);
+    result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx));
+  } else {
+    ShouldNotReachHere(); // no call to generate_slow_arraycopy:
+                          // projections were not extracted
+  }
+
+  // Remove unused edges.
+  for (uint i = 1; i < result_region->req(); i++) {
+    if (result_region->in(i) == NULL) {
+      result_region->init_req(i, top());
+    }
+  }
+
+  // Finished; return the combined state.
+  *ctrl = result_region;
+  *io = result_i_o;
+  mem->set_memory_at(alias_idx, result_memory);
+
+  // mem no longer guaranteed to stay a MergeMemNode
+  Node* out_mem = mem;
+  DEBUG_ONLY(mem = NULL);
+
+  // The memory edges above are precise in order to model effects around
+  // array copies accurately to allow value numbering of field loads around
+  // arraycopy.  Such field loads, both before and after, are common in Java
+  // collections and similar classes involving header/array data structures.
+  //
+  // But with low number of register or when some registers are used or killed
+  // by arraycopy calls it causes registers spilling on stack. See 6544710.
+  // The next memory barrier is added to avoid it. If the arraycopy can be
+  // optimized away (which it can, sometimes) then we can manually remove
+  // the membar also.
+  //
+  // Do not let reads from the cloned object float above the arraycopy.
+  if (alloc != NULL && !alloc->initialization()->does_not_escape()) {
+    // Do not let stores that initialize this object be reordered with
+    // a subsequent store that would make this object accessible by
+    // other threads.
+    insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore);
+  } else if (InsertMemBarAfterArraycopy) {
+    insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder);
+  }
+
+  _igvn.replace_node(_memproj_fallthrough, out_mem);
+  _igvn.replace_node(_ioproj_fallthrough, *io);
+  _igvn.replace_node(_fallthroughcatchproj, *ctrl);
+
+  return out_mem;
+}
+
+// Helper for initialization of arrays, creating a ClearArray.
+// It writes zero bits in [start..end), within the body of an array object.
+// The memory effects are all chained onto the 'adr_type' alias category.
+//
+// Since the object is otherwise uninitialized, we are free
+// to put a little "slop" around the edges of the cleared area,
+// as long as it does not go back into the array's header,
+// or beyond the array end within the heap.
+//
+// The lower edge can be rounded down to the nearest jint and the
+// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
+//
+// Arguments:
+//   adr_type           memory slice where writes are generated
+//   dest               oop of the destination array
+//   basic_elem_type    element type of the destination
+//   slice_idx          array index of first element to store
+//   slice_len          number of elements to store (or NULL)
+//   dest_size          total size in bytes of the array object
+//
+// Exactly one of slice_len or dest_size must be non-NULL.
+// If dest_size is non-NULL, zeroing extends to the end of the object.
+// If slice_len is non-NULL, the slice_idx value must be a constant.
+void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
+                                            const TypePtr* adr_type,
+                                            Node* dest,
+                                            BasicType basic_elem_type,
+                                            Node* slice_idx,
+                                            Node* slice_len,
+                                            Node* dest_size) {
+  // one or the other but not both of slice_len and dest_size:
+  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
+  if (slice_len == NULL)  slice_len = top();
+  if (dest_size == NULL)  dest_size = top();
+
+  uint alias_idx = C->get_alias_index(adr_type);
+
+  // operate on this memory slice:
+  Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on
+
+  // scaling and rounding of indexes:
+  int scale = exact_log2(type2aelembytes(basic_elem_type));
+  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
+  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
+  int bump_bit  = (-1 << scale) & BytesPerInt;
+
+  // determine constant starts and ends
+  const intptr_t BIG_NEG = -128;
+  assert(BIG_NEG + 2*abase < 0, "neg enough");
+  intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG);
+  intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG);
+  if (slice_len_con == 0) {
+    return;                     // nothing to do here
+  }
+  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
+  intptr_t end_con   = _igvn.find_intptr_t_con(dest_size, -1);
+  if (slice_idx_con >= 0 && slice_len_con >= 0) {
+    assert(end_con < 0, "not two cons");
+    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
+                       BytesPerLong);
+  }
+
+  if (start_con >= 0 && end_con >= 0) {
+    // Constant start and end.  Simple.
+    mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
+                                       start_con, end_con, &_igvn);
+  } else if (start_con >= 0 && dest_size != top()) {
+    // Constant start, pre-rounded end after the tail of the array.
+    Node* end = dest_size;
+    mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
+                                       start_con, end, &_igvn);
+  } else if (start_con >= 0 && slice_len != top()) {
+    // Constant start, non-constant end.  End needs rounding up.
+    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
+    intptr_t end_base  = abase + (slice_idx_con << scale);
+    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
+    Node*    end       = ConvI2X(slice_len);
+    if (scale != 0)
+      end = transform_later(new LShiftXNode(end, intcon(scale) ));
+    end_base += end_round;
+    end = transform_later(new AddXNode(end, MakeConX(end_base)) );
+    end = transform_later(new AndXNode(end, MakeConX(~end_round)) );
+    mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
+                                       start_con, end, &_igvn);
+  } else if (start_con < 0 && dest_size != top()) {
+    // Non-constant start, pre-rounded end after the tail of the array.
+    // This is almost certainly a "round-to-end" operation.
+    Node* start = slice_idx;
+    start = ConvI2X(start);
+    if (scale != 0)
+      start = transform_later(new LShiftXNode( start, intcon(scale) ));
+    start = transform_later(new AddXNode(start, MakeConX(abase)) );
+    if ((bump_bit | clear_low) != 0) {
+      int to_clear = (bump_bit | clear_low);
+      // Align up mod 8, then store a jint zero unconditionally
+      // just before the mod-8 boundary.
+      if (((abase + bump_bit) & ~to_clear) - bump_bit
+          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
+        bump_bit = 0;
+        assert((abase & to_clear) == 0, "array base must be long-aligned");
+      } else {
+        // Bump 'start' up to (or past) the next jint boundary:
+        start = transform_later( new AddXNode(start, MakeConX(bump_bit)) );
+        assert((abase & clear_low) == 0, "array base must be int-aligned");
+      }
+      // Round bumped 'start' down to jlong boundary in body of array.
+      start = transform_later(new AndXNode(start, MakeConX(~to_clear)) );
+      if (bump_bit != 0) {
+        // Store a zero to the immediately preceding jint:
+        Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) );
+        Node* p1 = basic_plus_adr(dest, x1);
+        mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
+        mem = transform_later(mem);
+      }
+    }
+    Node* end = dest_size; // pre-rounded
+    mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
+                                       start, end, &_igvn);
+  } else {
+    // Non-constant start, unrounded non-constant end.
+    // (Nobody zeroes a random midsection of an array using this routine.)
+    ShouldNotReachHere();       // fix caller
+  }
+
+  // Done.
+  merge_mem->set_memory_at(alias_idx, mem);
+}
+
+bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
+                                                const TypePtr* adr_type,
+                                                BasicType basic_elem_type,
+                                                AllocateNode* alloc,
+                                                Node* src,  Node* src_offset,
+                                                Node* dest, Node* dest_offset,
+                                                Node* dest_size, bool dest_uninitialized) {
+  // See if there is an advantage from block transfer.
+  int scale = exact_log2(type2aelembytes(basic_elem_type));
+  if (scale >= LogBytesPerLong)
+    return false;               // it is already a block transfer
+
+  // Look at the alignment of the starting offsets.
+  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
+
+  intptr_t src_off_con  = (intptr_t) _igvn.find_int_con(src_offset, -1);
+  intptr_t dest_off_con = (intptr_t) _igvn.find_int_con(dest_offset, -1);
+  if (src_off_con < 0 || dest_off_con < 0) {
+    // At present, we can only understand constants.
+    return false;
+  }
+
+  intptr_t src_off  = abase + (src_off_con  << scale);
+  intptr_t dest_off = abase + (dest_off_con << scale);
+
+  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
+    // Non-aligned; too bad.
+    // One more chance:  Pick off an initial 32-bit word.
+    // This is a common case, since abase can be odd mod 8.
+    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
+        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
+      Node* sptr = basic_plus_adr(src,  src_off);
+      Node* dptr = basic_plus_adr(dest, dest_off);
+      uint alias_idx = C->get_alias_index(adr_type);
+      Node* sval = transform_later(LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), sptr, adr_type, TypeInt::INT, T_INT, MemNode::unordered));
+      Node* st = transform_later(StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(alias_idx), dptr, adr_type, sval, T_INT, MemNode::unordered));
+      (*mem)->set_memory_at(alias_idx, st);
+      src_off += BytesPerInt;
+      dest_off += BytesPerInt;
+    } else {
+      return false;
+    }
+  }
+  assert(src_off % BytesPerLong == 0, "");
+  assert(dest_off % BytesPerLong == 0, "");
+
+  // Do this copy by giant steps.
+  Node* sptr  = basic_plus_adr(src,  src_off);
+  Node* dptr  = basic_plus_adr(dest, dest_off);
+  Node* countx = dest_size;
+  countx = transform_later(new SubXNode(countx, MakeConX(dest_off)));
+  countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong)));
+
+  bool disjoint_bases = true;   // since alloc != NULL
+  generate_unchecked_arraycopy(ctrl, mem,
+                               adr_type, T_LONG, disjoint_bases,
+                               sptr, NULL, dptr, NULL, countx, dest_uninitialized);
+
+  return true;
+}
+
+// Helper function; generates code for the slow case.
+// We make a call to a runtime method which emulates the native method,
+// but without the native wrapper overhead.
+MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac,
+                                                        Node** ctrl, Node* mem, Node** io,
+                                                        const TypePtr* adr_type,
+                                                        Node* src,  Node* src_offset,
+                                                        Node* dest, Node* dest_offset,
+                                                        Node* copy_length, bool dest_uninitialized) {
+  assert(!dest_uninitialized, "Invariant");
+
+  const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type();
+  CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(),
+                                          "slow_arraycopy",
+                                          ac->jvms()->bci(), TypePtr::BOTTOM);
+
+  call->init_req(TypeFunc::Control, *ctrl);
+  call->init_req(TypeFunc::I_O    , *io);
+  call->init_req(TypeFunc::Memory , mem);
+  call->init_req(TypeFunc::ReturnAdr, top());
+  call->init_req(TypeFunc::FramePtr, top());
+  call->init_req(TypeFunc::Parms+0, src);
+  call->init_req(TypeFunc::Parms+1, src_offset);
+  call->init_req(TypeFunc::Parms+2, dest);
+  call->init_req(TypeFunc::Parms+3, dest_offset);
+  call->init_req(TypeFunc::Parms+4, copy_length);
+  copy_call_debug_info(ac, call);
+
+  call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
+  _igvn.replace_node(ac, call);
+  transform_later(call);
+
+  extract_call_projections(call);
+  *ctrl = _fallthroughcatchproj->clone();
+  transform_later(*ctrl);
+
+  Node* m = _memproj_fallthrough->clone();
+  transform_later(m);
+
+  uint alias_idx = C->get_alias_index(adr_type);
+  MergeMemNode* out_mem;
+  if (alias_idx != Compile::AliasIdxBot) {
+    out_mem = MergeMemNode::make(mem);
+    out_mem->set_memory_at(alias_idx, m);
+  } else {
+    out_mem = MergeMemNode::make(m);
+  }
+  transform_later(out_mem);
+
+  *io = _ioproj_fallthrough->clone();
+  transform_later(*io);
+
+  return out_mem;
+}
+
+// Helper function; generates code for cases requiring runtime checks.
+Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                                     const TypePtr* adr_type,
+                                                     Node* dest_elem_klass,
+                                                     Node* src,  Node* src_offset,
+                                                     Node* dest, Node* dest_offset,
+                                                     Node* copy_length, bool dest_uninitialized) {
+  if ((*ctrl)->is_top())  return NULL;
+
+  address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
+  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
+    return NULL;
+  }
+
+  // Pick out the parameters required to perform a store-check
+  // for the target array.  This is an optimistic check.  It will
+  // look in each non-null element's class, at the desired klass's
+  // super_check_offset, for the desired klass.
+  int sco_offset = in_bytes(Klass::super_check_offset_offset());
+  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
+  Node* n3 = new LoadINode(NULL, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
+  Node* check_offset = ConvI2X(transform_later(n3));
+  Node* check_value  = dest_elem_klass;
+
+  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
+  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
+
+  const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type();
+  Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type,
+                              src_start, dest_start, copy_length XTOP, check_offset XTOP, check_value);
+
+  finish_arraycopy_call(call, ctrl, mem, adr_type);
+
+  Node* proj =  new ProjNode(call, TypeFunc::Parms);
+  transform_later(proj);
+
+  return proj;
+}
+
+// Helper function; generates code for cases requiring runtime checks.
+Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                                   const TypePtr* adr_type,
+                                                   Node* src,  Node* src_offset,
+                                                   Node* dest, Node* dest_offset,
+                                                   Node* copy_length, bool dest_uninitialized) {
+  if ((*ctrl)->is_top()) return NULL;
+  assert(!dest_uninitialized, "Invariant");
+
+  address copyfunc_addr = StubRoutines::generic_arraycopy();
+  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
+    return NULL;
+  }
+
+  const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type();
+  Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "generic_arraycopy", adr_type,
+                              src, src_offset, dest, dest_offset, copy_length);
+
+  finish_arraycopy_call(call, ctrl, mem, adr_type);
+
+  Node* proj =  new ProjNode(call, TypeFunc::Parms);
+  transform_later(proj);
+
+  return proj;
+}
+
+// Helper function; generates the fast out-of-line call to an arraycopy stub.
+void PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem,
+                                                    const TypePtr* adr_type,
+                                                    BasicType basic_elem_type,
+                                                    bool disjoint_bases,
+                                                    Node* src,  Node* src_offset,
+                                                    Node* dest, Node* dest_offset,
+                                                    Node* copy_length, bool dest_uninitialized) {
+  if ((*ctrl)->is_top()) return;
+
+  Node* src_start  = src;
+  Node* dest_start = dest;
+  if (src_offset != NULL || dest_offset != NULL) {
+    src_start =  array_element_address(src, src_offset, basic_elem_type);
+    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
+  }
+
+  // Figure out which arraycopy runtime method to call.
+  const char* copyfunc_name = "arraycopy";
+  address     copyfunc_addr =
+      basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
+                          disjoint_bases, copyfunc_name, dest_uninitialized);
+
+  const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
+  Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, copyfunc_name, adr_type,
+                              src_start, dest_start, copy_length XTOP);
+
+  finish_arraycopy_call(call, ctrl, mem, adr_type);
+}
+
+void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
+  Node* ctrl = ac->in(TypeFunc::Control);
+  Node* io = ac->in(TypeFunc::I_O);
+  Node* src = ac->in(ArrayCopyNode::Src);
+  Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
+  Node* dest = ac->in(ArrayCopyNode::Dest);
+  Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
+  Node* length = ac->in(ArrayCopyNode::Length);
+  MergeMemNode* merge_mem = NULL;
+
+  if (ac->is_clonebasic()) {
+    assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null");
+    Node* mem = ac->in(TypeFunc::Memory);
+    const char* copyfunc_name = "arraycopy";
+    address     copyfunc_addr =
+      basictype2arraycopy(T_LONG, NULL, NULL,
+                          true, copyfunc_name, true);
+
+    const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
+    const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type();
+
+    Node* call = make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP);
+    transform_later(call);
+
+    _igvn.replace_node(ac, call);
+    return;
+  } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) {
+    Node* mem = ac->in(TypeFunc::Memory);
+    merge_mem = MergeMemNode::make(mem);
+    transform_later(merge_mem);
+
+    RegionNode* slow_region = new RegionNode(1);
+    transform_later(slow_region);
+
+    AllocateArrayNode* alloc = NULL;
+    if (ac->is_alloc_tightly_coupled()) {
+      alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
+      assert(alloc != NULL, "expect alloc");
+    }
+
+    generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
+                       TypeAryPtr::OOPS, T_OBJECT,
+                       src, src_offset, dest, dest_offset, length,
+                       true, !ac->is_copyofrange());
+
+    return;
+  }
+
+  AllocateArrayNode* alloc = NULL;
+  if (ac->is_alloc_tightly_coupled()) {
+    alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn);
+    assert(alloc != NULL, "expect alloc");
+  }
+
+  assert(ac->is_arraycopy() || ac->is_arraycopy_notest(), "should be an arraycopy");
+
+  // Compile time checks.  If any of these checks cannot be verified at compile time,
+  // we do not make a fast path for this call.  Instead, we let the call remain as it
+  // is.  The checks we choose to mandate at compile time are:
+  //
+  // (1) src and dest are arrays.
+  const Type* src_type = src->Value(&_igvn);
+  const Type* dest_type = dest->Value(&_igvn);
+  const TypeAryPtr* top_src = src_type->isa_aryptr();
+  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
+
+  if (top_src  == NULL || top_src->klass()  == NULL ||
+      top_dest == NULL || top_dest->klass() == NULL) {
+    // Conservatively insert a memory barrier on all memory slices.
+    // Do not let writes into the source float below the arraycopy.
+    {
+      Node* mem = ac->in(TypeFunc::Memory);
+      insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder);
+
+      merge_mem = MergeMemNode::make(mem);
+      transform_later(merge_mem);
+    }
+
+    // Call StubRoutines::generic_arraycopy stub.
+    Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io,
+                                   TypeRawPtr::BOTTOM, T_CONFLICT,
+                                   src, src_offset, dest, dest_offset, length);
+
+    // Do not let reads from the destination float above the arraycopy.
+    // Since we cannot type the arrays, we don't know which slices
+    // might be affected.  We could restrict this barrier only to those
+    // memory slices which pertain to array elements--but don't bother.
+    if (!InsertMemBarAfterArraycopy) {
+      // (If InsertMemBarAfterArraycopy, there is already one in place.)
+      insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder);
+    }
+    return;
+  }
+  // (2) src and dest arrays must have elements of the same BasicType
+  // Figure out the size and type of the elements we will be copying.
+  BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
+  BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
+  if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
+  if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
+
+  if (src_elem != dest_elem || dest_elem == T_VOID) {
+    // The component types are not the same or are not recognized.  Punt.
+    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
+    {
+      Node* mem = ac->in(TypeFunc::Memory);
+      merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false);
+    }
+
+    _igvn.replace_node(_memproj_fallthrough, merge_mem);
+    _igvn.replace_node(_ioproj_fallthrough, io);
+    _igvn.replace_node(_fallthroughcatchproj, ctrl);
+    return;
+  }
+
+  //---------------------------------------------------------------------------
+  // We will make a fast path for this call to arraycopy.
+
+  // We have the following tests left to perform:
+  //
+  // (3) src and dest must not be null.
+  // (4) src_offset must not be negative.
+  // (5) dest_offset must not be negative.
+  // (6) length must not be negative.
+  // (7) src_offset + length must not exceed length of src.
+  // (8) dest_offset + length must not exceed length of dest.
+  // (9) each element of an oop array must be assignable
+
+  {
+    Node* mem = ac->in(TypeFunc::Memory);
+    merge_mem = MergeMemNode::make(mem);
+    transform_later(merge_mem);
+  }
+
+  RegionNode* slow_region = new RegionNode(1);
+  transform_later(slow_region);
+
+  if (!ac->is_arraycopy_notest()) {
+    // (3) operands must not be null
+    // We currently perform our null checks with the null_check routine.
+    // This means that the null exceptions will be reported in the caller
+    // rather than (correctly) reported inside of the native arraycopy call.
+    // This should be corrected, given time.  We do our null check with the
+    // stack pointer restored.
+    // null checks done library_call.cpp
+
+    // (4) src_offset must not be negative.
+    generate_negative_guard(&ctrl, src_offset, slow_region);
+
+    // (5) dest_offset must not be negative.
+    generate_negative_guard(&ctrl, dest_offset, slow_region);
+
+    // (6) length must not be negative (moved to generate_arraycopy()).
+    // generate_negative_guard(length, slow_region);
+
+    // (7) src_offset + length must not exceed length of src.
+    Node* alen = ac->in(ArrayCopyNode::SrcLen);
+    generate_limit_guard(&ctrl,
+                         src_offset, length,
+                         alen,
+                         slow_region);
+
+    // (8) dest_offset + length must not exceed length of dest.
+    alen = ac->in(ArrayCopyNode::DestLen);
+    generate_limit_guard(&ctrl,
+                         dest_offset, length,
+                         alen,
+                         slow_region);
+
+    // (9) each element of an oop array must be assignable
+    // The generate_arraycopy subroutine checks this.
+  }
+  // This is where the memory effects are placed:
+  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
+  generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
+                     adr_type, dest_elem,
+                     src, src_offset, dest, dest_offset, length,
+                     false, false, slow_region);
+}
--- a/hotspot/src/share/vm/opto/node.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -1083,6 +1083,9 @@
   if( this->is_Store() ) {
     // Condition for back-to-back stores folding.
     return n->Opcode() == op && n->in(MemNode::Memory) == this;
+  } else if (this->is_Load()) {
+    // Condition for removing an unused LoadNode from the MemBarAcquire precedence input
+    return n->Opcode() == Op_MemBarAcquire;
   } else if( op == Op_AddL ) {
     // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
     return n->Opcode() == Op_ConvL2I && n->in(1) == this;
--- a/hotspot/src/share/vm/opto/node.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -40,6 +40,7 @@
 class AliasInfo;
 class AllocateArrayNode;
 class AllocateNode;
+class ArrayCopyNode;
 class Block;
 class BoolNode;
 class BoxLockNode;
@@ -561,6 +562,7 @@
           DEFINE_CLASS_ID(AbstractLock,     Call, 3)
             DEFINE_CLASS_ID(Lock,             AbstractLock, 0)
             DEFINE_CLASS_ID(Unlock,           AbstractLock, 1)
+          DEFINE_CLASS_ID(ArrayCopy,        Call, 4)
       DEFINE_CLASS_ID(MultiBranch, Multi, 1)
         DEFINE_CLASS_ID(PCTable,     MultiBranch, 0)
           DEFINE_CLASS_ID(Catch,       PCTable, 0)
@@ -707,6 +709,7 @@
   DEFINE_CLASS_QUERY(AddP)
   DEFINE_CLASS_QUERY(Allocate)
   DEFINE_CLASS_QUERY(AllocateArray)
+  DEFINE_CLASS_QUERY(ArrayCopy)
   DEFINE_CLASS_QUERY(Bool)
   DEFINE_CLASS_QUERY(BoxLock)
   DEFINE_CLASS_QUERY(Call)
--- a/hotspot/src/share/vm/opto/phase.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/phase.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,10 @@
 
 #include "runtime/timer.hpp"
 
-class Compile;
+class IfNode;
+class MergeMemNode;
+class Node;
+class PhaseGVN;
 
 //------------------------------Phase------------------------------------------
 // Most optimizations are done in Phases.  Creating a phase does any long
@@ -114,9 +117,20 @@
   static elapsedTimer   _t_instrSched;
   static elapsedTimer   _t_buildOopMaps;
 #endif
+
+  // Generate a subtyping check.  Takes as input the subtype and supertype.
+  // Returns 2 values: sets the default control() to the true path and
+  // returns the false path.  Only reads from constant memory taken from the
+  // default memory; does not write anything.  It also doesn't take in an
+  // Object; if you wish to check an Object you need to load the Object's
+  // class prior to coming here.
+  // Used in GraphKit and PhaseMacroExpand
+  static Node* gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, MergeMemNode* mem, PhaseGVN* gvn);
+
 public:
   Compile * C;
   Phase( PhaseNumber pnum );
+
 #ifndef PRODUCT
   static void print_timers();
 #endif
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -390,6 +390,9 @@
   // in a faster or cheaper fashion.
   Node  *transform( Node *n );
   Node  *transform_no_reclaim( Node *n );
+  virtual void record_for_igvn(Node *n) {
+    C->record_for_igvn(n);
+  }
 
   void replace_with(PhaseGVN* gvn) {
     _table.replace_with(&gvn->_table);
@@ -418,9 +421,6 @@
 
 protected:
 
-  // Idealize new Node 'n' with respect to its inputs and its value
-  virtual Node *transform( Node *a_node );
-
   // Warm up hash table, type table and initial worklist
   void init_worklist( Node *a_root );
 
@@ -434,6 +434,10 @@
   PhaseIterGVN( PhaseGVN *gvn ); // Used after Parser
   PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ); // Used after +VerifyOpto
 
+  // Idealize new Node 'n' with respect to its inputs and its value
+  virtual Node *transform( Node *a_node );
+  virtual void record_for_igvn(Node *n) { }
+
   virtual PhaseIterGVN *is_IterGVN() { return this; }
 
   Unique_Node_List _worklist;       // Iterative worklist
--- a/hotspot/src/share/vm/opto/subnode.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -1201,6 +1201,54 @@
   return new BoolNode(in(1), _test.negate());
 }
 
+// Change "bool eq/ne (cmp (add/sub A B) C)" into false/true if add/sub
+// overflows and we can prove that C is not in the two resulting ranges.
+// This optimization is similar to the one performed by CmpUNode::Value().
+Node* BoolNode::fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op,
+                          int cmp1_op, const TypeInt* cmp2_type) {
+  // Only optimize eq/ne integer comparison of add/sub
+  if((_test._test == BoolTest::eq || _test._test == BoolTest::ne) &&
+     (cmp_op == Op_CmpI) && (cmp1_op == Op_AddI || cmp1_op == Op_SubI)) {
+    // Skip cases were inputs of add/sub are not integers or of bottom type
+    const TypeInt* r0 = phase->type(cmp1->in(1))->isa_int();
+    const TypeInt* r1 = phase->type(cmp1->in(2))->isa_int();
+    if ((r0 != NULL) && (r0 != TypeInt::INT) &&
+        (r1 != NULL) && (r1 != TypeInt::INT) &&
+        (cmp2_type != TypeInt::INT)) {
+      // Compute exact (long) type range of add/sub result
+      jlong lo_long = r0->_lo;
+      jlong hi_long = r0->_hi;
+      if (cmp1_op == Op_AddI) {
+        lo_long += r1->_lo;
+        hi_long += r1->_hi;
+      } else {
+        lo_long -= r1->_hi;
+        hi_long -= r1->_lo;
+      }
+      // Check for over-/underflow by casting to integer
+      int lo_int = (int)lo_long;
+      int hi_int = (int)hi_long;
+      bool underflow = lo_long != (jlong)lo_int;
+      bool overflow  = hi_long != (jlong)hi_int;
+      if ((underflow != overflow) && (hi_int < lo_int)) {
+        // Overflow on one boundary, compute resulting type ranges:
+        // tr1 [MIN_INT, hi_int] and tr2 [lo_int, MAX_INT]
+        int w = MAX2(r0->_widen, r1->_widen); // _widen does not matter here
+        const TypeInt* tr1 = TypeInt::make(min_jint, hi_int, w);
+        const TypeInt* tr2 = TypeInt::make(lo_int, max_jint, w);
+        // Compare second input of cmp to both type ranges
+        const Type* sub_tr1 = cmp->sub(tr1, cmp2_type);
+        const Type* sub_tr2 = cmp->sub(tr2, cmp2_type);
+        if (sub_tr1 == TypeInt::CC_LT && sub_tr2 == TypeInt::CC_GT) {
+          // The result of the add/sub will never equal cmp2. Replace BoolNode
+          // by false (0) if it tests for equality and by true (1) otherwise.
+          return ConINode::make((_test._test == BoolTest::eq) ? 0 : 1);
+        }
+      }
+    }
+  }
+  return NULL;
+}
 
 //------------------------------Ideal------------------------------------------
 Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -1294,6 +1342,9 @@
     return new BoolNode( ncmp, _test.commute() );
   }
 
+  // Try to optimize signed integer comparison
+  return fold_cmpI(phase, cmp->as_Sub(), cmp1, cop, cmp1_op, cmp2_type);
+
   //  The transformation below is not valid for either signed or unsigned
   //  comparisons due to wraparound concerns at MAX_VALUE and MIN_VALUE.
   //  This transformation can be resurrected when we are able to
@@ -1338,8 +1389,6 @@
   //         phase->type( cmp2->in(2) ) == TypeInt::ONE )
   //        return clone_cmp( cmp, cmp1, cmp2->in(1), phase, BoolTest::le );
   //    }
-
-  return NULL;
 }
 
 //------------------------------Value------------------------------------------
--- a/hotspot/src/share/vm/opto/subnode.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/opto/subnode.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -286,6 +286,10 @@
   virtual uint hash() const;
   virtual uint cmp( const Node &n ) const;
   virtual uint size_of() const;
+
+  // Try to optimize signed integer comparison
+  Node* fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op,
+                  int cmp1_op, const TypeInt* cmp2_type);
 public:
   const BoolTest _test;
   BoolNode( Node *cc, BoolTest::mask t): _test(t), Node(0,cc) {
--- a/hotspot/src/share/vm/prims/jni.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -3865,6 +3865,7 @@
 void TestG1BiasedArray_test();
 void TestBufferingOopClosure_test();
 void TestCodeCacheRemSet_test();
+void FreeRegionList_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -3900,6 +3901,9 @@
     run_unit_test(HeapRegionRemSet::test_prt());
     run_unit_test(TestBufferingOopClosure_test());
     run_unit_test(TestCodeCacheRemSet_test());
+    if (UseG1GC) {
+      run_unit_test(FreeRegionList_test());
+    }
 #endif
     tty->print_cr("All internal VM tests passed");
   }
--- a/hotspot/src/share/vm/prims/jvm.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -28,6 +28,10 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/vmSymbols.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
@@ -993,7 +997,15 @@
                                                               h_loader,
                                                               Handle(),
                                                               CHECK_NULL);
-
+#if INCLUDE_CDS
+  if (k == NULL) {
+    // If the class is not already loaded, try to see if it's in the shared
+    // archive for the current classloader (h_loader).
+    instanceKlassHandle ik = SystemDictionaryShared::find_or_load_shared_class(
+        klass_name, h_loader, CHECK_NULL);
+    k = ik();
+  }
+#endif
   return (k == NULL) ? NULL :
             (jclass) JNIHandles::make_local(env, k->java_mirror());
 JVM_END
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -47,6 +47,7 @@
 #include "utilities/exceptions.hpp"
 
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
@@ -227,6 +228,30 @@
                                         (size_t) magnitude, (size_t) iterations);
 WB_END
 
+WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
+  oop p = JNIHandles::resolve(obj);
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    G1CollectedHeap* g1 = G1CollectedHeap::heap();
+    const HeapRegion* hr = g1->heap_region_containing(p);
+    if (hr == NULL) {
+      return false;
+    }
+    return !(hr->is_young());
+  } else if (UseParallelGC) {
+    ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
+    return !psh->is_in_young(p);
+  }
+#endif // INCLUDE_ALL_GCS
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  return !gch->is_in_young(p);
+WB_END
+
+WB_ENTRY(jlong, WB_GetObjectSize(JNIEnv* env, jobject o, jobject obj))
+  oop p = JNIHandles::resolve(obj);
+  return p->size() * HeapWordSize;
+WB_END
+
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -237,7 +262,7 @@
 
 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  size_t nr = g1->free_regions();
+  size_t nr = g1->num_free_regions();
   return (jlong)nr;
 WB_END
 
@@ -690,6 +715,9 @@
   Universe::heap()->collect(GCCause::_last_ditch_collection);
 WB_END
 
+WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
+  Universe::heap()->collect(GCCause::_wb_young_gc);
+WB_END
 
 WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
   // static+volatile in order to force the read to happen
@@ -841,6 +869,8 @@
 
 static JNINativeMethod methods[] = {
   {CC"getObjectAddress",   CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress  },
+  {CC"getObjectSize",      CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize     },
+  {CC"isObjectInOldGen",   CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen  },
   {CC"getHeapOopSize",     CC"()I",                   (void*)&WB_GetHeapOopSize    },
   {CC"isClassAlive0",      CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
   {CC"parseCommandLine",
@@ -919,6 +949,7 @@
                                                       (void*)&WB_GetStringVMFlag},
   {CC"isInStringTable",    CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable  },
   {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
+  {CC"youngGC",  CC"()V",                             (void*)&WB_YoungGC },
   {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
   {CC"allocateMetaspace",
      CC"(Ljava/lang/ClassLoader;J)J",                 (void*)&WB_AllocateMetaspace },
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
 #include "classfile/javaAssertions.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
@@ -43,6 +44,7 @@
 #include "services/memTracker.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
+#include "utilities/stringUtils.hpp"
 #include "utilities/taskqueue.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
@@ -1119,11 +1121,11 @@
 // Conflict: required to use shared spaces (-Xshare:on), but
 // incompatible command line options were chosen.
 
-static void no_shared_spaces() {
+static void no_shared_spaces(const char* message) {
   if (RequireSharedSpaces) {
     jio_fprintf(defaultStream::error_stream(),
       "Class data sharing is inconsistent with other specified options.\n");
-    vm_exit_during_initialization("Unable to use shared archive.", NULL);
+    vm_exit_during_initialization("Unable to use shared archive.", message);
   } else {
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
   }
@@ -1585,7 +1587,7 @@
   // at link time, or rewrite bytecodes in non-shared methods.
   if (!DumpSharedSpaces && !RequireSharedSpaces &&
       (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
-    no_shared_spaces();
+    no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
   }
 #endif
 
@@ -3306,6 +3308,15 @@
     }
   }
 
+  // PrintSharedArchiveAndExit will turn on
+  //   -Xshare:on
+  //   -XX:+TraceClassPaths
+  if (PrintSharedArchiveAndExit) {
+    FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
+    FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
+    FLAG_SET_CMDLINE(bool, TraceClassPaths, true);
+  }
+
   // Change the default value for flags  which have different default values
   // when working with older JDKs.
 #ifdef LINUX
@@ -3314,9 +3325,55 @@
     FLAG_SET_DEFAULT(UseLinuxPosixThreadCPUClocks, false);
   }
 #endif // LINUX
+  fix_appclasspath();
   return JNI_OK;
 }
 
+// Remove all empty paths from the app classpath (if IgnoreEmptyClassPaths is enabled)
+//
+// This is necessary because some apps like to specify classpath like -cp foo.jar:${XYZ}:bar.jar
+// in their start-up scripts. If XYZ is empty, the classpath will look like "-cp foo.jar::bar.jar".
+// Java treats such empty paths as if the user specified "-cp foo.jar:.:bar.jar". I.e., an empty
+// path is treated as the current directory.
+//
+// This causes problems with CDS, which requires that all directories specified in the classpath
+// must be empty. In most cases, applications do NOT want to load classes from the current
+// directory anyway. Adding -XX:+IgnoreEmptyClassPaths will make these applications' start-up
+// scripts compatible with CDS.
+void Arguments::fix_appclasspath() {
+  if (IgnoreEmptyClassPaths) {
+    const char separator = *os::path_separator();
+    const char* src = _java_class_path->value();
+
+    // skip over all the leading empty paths
+    while (*src == separator) {
+      src ++;
+    }
+
+    char* copy = AllocateHeap(strlen(src) + 1, mtInternal);
+    strncpy(copy, src, strlen(src) + 1);
+
+    // trim all trailing empty paths
+    for (char* tail = copy + strlen(copy) - 1; tail >= copy && *tail == separator; tail--) {
+      *tail = '\0';
+    }
+
+    char from[3] = {separator, separator, '\0'};
+    char to  [2] = {separator, '\0'};
+    while (StringUtils::replace_no_expand(copy, from, to) > 0) {
+      // Keep replacing "::" -> ":" until we have no more "::" (non-windows)
+      // Keep replacing ";;" -> ";" until we have no more ";;" (windows)
+    }
+
+    _java_class_path->set_value(copy);
+    FreeHeap(copy); // a copy was made by set_value, so don't need this anymore
+  }
+
+  if (!PrintSharedArchiveAndExit) {
+    ClassLoader::trace_class_path("[classpath: ", _java_class_path->value());
+  }
+}
+
 jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) {
   // This must be done after all -D arguments have been processed.
   scp_p->expand_endorsed();
@@ -3487,9 +3544,8 @@
         "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
     }
   } else {
-    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
     if (!UseCompressedOops || !UseCompressedClassPointers) {
-      no_shared_spaces();
+      no_shared_spaces("UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.");
     }
 #endif
   }
@@ -3600,9 +3656,9 @@
 #if INCLUDE_NMT
     if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
       // The launcher did not setup nmt environment variable properly.
-//      if (!MemTracker::check_launcher_nmt_support(tail)) {
-//        warning("Native Memory Tracking did not setup properly, using wrong launcher?");
-//      }
+      if (!MemTracker::check_launcher_nmt_support(tail)) {
+        warning("Native Memory Tracking did not setup properly, using wrong launcher?");
+      }
 
       // Verify if nmt option is valid.
       if (MemTracker::verify_nmt_option()) {
@@ -3729,7 +3785,7 @@
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
     FLAG_SET_DEFAULT(PrintSharedSpaces, false);
   }
-  no_shared_spaces();
+  no_shared_spaces("CDS Disabled");
 #endif // INCLUDE_CDS
 
   return JNI_OK;
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -579,12 +579,15 @@
     _meta_index_dir  = meta_index_dir;
   }
 
-  static char *get_java_home() { return _java_home->value(); }
-  static char *get_dll_dir() { return _sun_boot_library_path->value(); }
-  static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); }
-  static char *get_sysclasspath() { return _sun_boot_class_path->value(); }
+  static char* get_java_home() { return _java_home->value(); }
+  static char* get_dll_dir() { return _sun_boot_library_path->value(); }
+  static char* get_endorsed_dir() { return _java_endorsed_dirs->value(); }
+  static char* get_sysclasspath() { return _sun_boot_class_path->value(); }
   static char* get_meta_index_path() { return _meta_index_path; }
   static char* get_meta_index_dir()  { return _meta_index_dir;  }
+  static char* get_ext_dirs() { return _java_ext_dirs->value(); }
+  static char* get_appclasspath() { return _java_class_path->value(); }
+  static void  fix_appclasspath();
 
   // Operation modi
   static Mode mode()                        { return _mode; }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -2345,6 +2345,12 @@
   notproduct(bool, TraceScavenge, false,                                    \
           "Trace scavenge")                                                 \
                                                                             \
+  product(bool, IgnoreEmptyClassPaths, false,                               \
+          "Ignore empty path elements in -classpath")                       \
+                                                                            \
+  product(bool, TraceClassPaths, false,                                     \
+          "Trace processing of class paths")                                \
+                                                                            \
   product_rw(bool, TraceClassLoading, false,                                \
           "Trace all classes loaded")                                       \
                                                                             \
@@ -3779,6 +3785,13 @@
   product(bool, PrintSharedSpaces, false,                                   \
           "Print usage of shared spaces")                                   \
                                                                             \
+  product(bool, PrintSharedArchiveAndExit, false,                           \
+          "Print shared archive file contents")                             \
+                                                                            \
+  product(bool, PrintSharedDictionary, false,                               \
+          "If PrintSharedArchiveAndExit is true, also print the shared "    \
+          "dictionary")                                                     \
+                                                                            \
   product(uintx, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(16*M),      \
           "Size of read-write space for metadata (in bytes)")               \
                                                                             \
@@ -3795,6 +3808,10 @@
           NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)),                           \
           "Address to allocate shared memory region for class data")        \
                                                                             \
+  diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false,              \
+          "Do not quit -Xshare:dump even if we encounter unverifiable "     \
+          "classes. Just exclude them from the shared dictionary.")         \
+                                                                            \
   diagnostic(bool, PrintMethodHandleStubs, false,                           \
           "Print generated stub code for method handles")                   \
                                                                             \
@@ -3885,9 +3902,19 @@
   product(bool , AllowNonVirtualCalls, false,                               \
           "Obey the ACC_SUPER flag and allow invokenonvirtual calls")       \
                                                                             \
+  product(ccstr, DumpLoadedClassList, NULL,                                 \
+          "Dump the names all loaded classes, that could be stored into "   \
+          "the CDS archive, in the specified file")                         \
+                                                                            \
+  product(ccstr, SharedClassListFile, NULL,                                 \
+          "Override the default CDS class list")                            \
+                                                                            \
   diagnostic(ccstr, SharedArchiveFile, NULL,                                \
           "Override the default location of the CDS archive file")          \
                                                                             \
+  product(ccstr, ExtraSharedClassListFile, NULL,                            \
+          "Extra classlist for building the CDS archive file")              \
+                                                                            \
   experimental(size_t, ArrayAllocatorMallocLimit,                           \
           SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1),                       \
           "Allocation less than this value will be allocated "              \
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -309,6 +309,10 @@
 }
 
 void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+  // During dumping, Java execution environment is not fully initialized. Also, Java execution
+  // may cause undesirable side-effects in the class metadata.
+  assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping");
+
   methodHandle method = *m;
   JavaThread* thread = (JavaThread*)THREAD;
   assert(thread->is_Java_thread(), "must be called by a java thread");
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -314,7 +314,7 @@
   nonstatic_field(InstanceKlass,               _jni_ids,                                      JNIid*)                                \
   nonstatic_field(InstanceKlass,               _osr_nmethods_head,                            nmethod*)                              \
   nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
-  nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
+  nonstatic_field(InstanceKlass,               _generic_signature_index,                      u2)                                    \
   nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
   volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
   nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
@@ -662,6 +662,7 @@
       static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          Klass*)                               \
+      static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                               \
--- a/hotspot/src/share/vm/services/mallocSiteTable.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/mallocSiteTable.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -32,13 +32,14 @@
 #include "services/allocationSite.hpp"
 #include "services/mallocTracker.hpp"
 #include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
 
 // MallocSite represents a code path that eventually calls
 // os::malloc() to allocate memory
 class MallocSite : public AllocationSite<MemoryCounter> {
  public:
   MallocSite() :
-    AllocationSite<MemoryCounter>(emptyStack) { }
+    AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK) { }
 
   MallocSite(const NativeCallStack& stack) :
     AllocationSite<MemoryCounter>(stack) { }
--- a/hotspot/src/share/vm/services/memTracker.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -39,8 +39,6 @@
 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
 
-NativeCallStack emptyStack(0, false);
-
 MemBaseline MemTracker::_baseline;
 Mutex*      MemTracker::_query_lock = NULL;
 bool MemTracker::_is_nmt_env_valid = true;
@@ -69,6 +67,10 @@
     os::unsetenv(buf);
   }
 
+  // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
+  // but it is benign, the results are the same.
+  ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
+
   if (!MallocTracker::initialize(level) ||
       !VirtualMemoryTracker::initialize(level)) {
     level = NMT_off;
@@ -77,7 +79,12 @@
 }
 
 void MemTracker::init() {
-  if (tracking_level() >= NMT_summary) {
+  NMT_TrackingLevel level = tracking_level();
+  if (level >= NMT_summary) {
+    if (!VirtualMemoryTracker::late_initialize(level)) {
+      shutdown();
+      return;
+    }
     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
     // Already OOM. It is unlikely, but still have to handle it.
     if (_query_lock == NULL) {
--- a/hotspot/src/share/vm/services/memTracker.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -26,14 +26,13 @@
 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
 
 #include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
 
-class NativeCallStack;
-extern NativeCallStack emptyStack;
 
 #if !INCLUDE_NMT
 
-#define CURRENT_PC   emptyStack
-#define CALLER_PC    emptyStack
+#define CURRENT_PC   NativeCallStack::EMPTY_STACK
+#define CALLER_PC    NativeCallStack::EMPTY_STACK
 
 class Tracker : public StackObj {
  public:
@@ -83,9 +82,9 @@
 extern volatile bool NMT_stack_walkable;
 
 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
-                    NativeCallStack(0, true) : emptyStack)
+                    NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK)
 #define CALLER_PC  ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ?  \
-                    NativeCallStack(1, true) : emptyStack)
+                    NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK)
 
 class MemBaseline;
 class Mutex;
--- a/hotspot/src/share/vm/services/nmtCommon.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/nmtCommon.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -50,9 +50,6 @@
 // build time decision.
 const int NMT_TrackingStackDepth = 4;
 
-class NativeCallStack;
-extern NativeCallStack emptyStack;
-
 // A few common utilities for native memory tracking
 class NMTUtil : AllStatic {
  public:
--- a/hotspot/src/share/vm/services/virtualMemoryTracker.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -34,7 +34,7 @@
   ::new ((void*)_snapshot) VirtualMemorySnapshot();
 }
 
-SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
+SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
 
 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
   return r1.compare(r2);
@@ -167,7 +167,7 @@
           // higher part
           address high_base = addr + sz;
           size_t  high_size = top - high_base;
-          CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
+          CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
           return add_committed_region(high_rgn);
         } else {
           return false;
@@ -283,17 +283,26 @@
   return true;
 }
 
+bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
+      SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
+    return (_reserved_regions != NULL);
+  }
+  return true;
+}
+
 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
   assert(base_addr != NULL, "Invalid address");
   assert(size > 0, "Invalid size");
-
+  assert(_reserved_regions != NULL, "Sanity check");
   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
-  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   LinkedListNode<ReservedMemoryRegion>* node;
   if (reserved_rgn == NULL) {
     VirtualMemorySummary::record_reserved_memory(size, flag);
-    node = _reserved_regions.add(rgn);
+    node = _reserved_regions->add(rgn);
     if (node != NULL) {
       node->data()->set_all_committed(all_committed);
       return true;
@@ -328,19 +337,28 @@
 
         *reserved_rgn = rgn;
         return true;
-      } else {
-        ShouldNotReachHere();
-        return false;
       }
+
+      // CDS mapping region.
+      // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
+      // NMT reports CDS as a whole.
+      if (reserved_rgn->flag() == mtClassShared) {
+        assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
+        return true;
+      }
+
+      ShouldNotReachHere();
+      return false;
     }
   }
 }
 
 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
   assert(addr != NULL, "Invalid address");
+  assert(_reserved_regions != NULL, "Sanity check");
 
   ReservedMemoryRegion   rgn(addr, 1);
-  ReservedMemoryRegion*  reserved_rgn = _reserved_regions.find(rgn);
+  ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
   if (reserved_rgn != NULL) {
     assert(reserved_rgn->contain_address(addr), "Containment");
     if (reserved_rgn->flag() != flag) {
@@ -354,8 +372,10 @@
   const NativeCallStack& stack) {
   assert(addr != NULL, "Invalid address");
   assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+
   ReservedMemoryRegion  rgn(addr, size);
-  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 
   assert(reserved_rgn != NULL, "No reserved region");
   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
@@ -365,8 +385,10 @@
 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
   assert(addr != NULL, "Invalid address");
   assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+
   ReservedMemoryRegion  rgn(addr, size);
-  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   assert(reserved_rgn != NULL, "No reserved region");
   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   return reserved_rgn->remove_uncommitted_region(addr, size);
@@ -375,9 +397,10 @@
 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
   assert(addr != NULL, "Invalid address");
   assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
 
   ReservedMemoryRegion  rgn(addr, size);
-  ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 
   assert(reserved_rgn != NULL, "No reserved region");
 
@@ -390,7 +413,7 @@
   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
 
   if (reserved_rgn->same_region(addr, size)) {
-    return _reserved_regions.remove(rgn);
+    return _reserved_regions->remove(rgn);
   } else {
     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
     if (reserved_rgn->base() == addr ||
@@ -405,7 +428,7 @@
 
       // use original region for lower region
       reserved_rgn->exclude_region(addr, top - addr);
-      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
+      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
       if (new_rgn == NULL) {
         return false;
       } else {
@@ -418,8 +441,9 @@
 
 
 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
+  assert(_reserved_regions != NULL, "Sanity check");
   ThreadCritical tc;
-  LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
+  LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
   while (head != NULL) {
     const ReservedMemoryRegion* rgn = head->peek();
     if (!walker->do_allocation_site(rgn)) {
@@ -439,7 +463,10 @@
     assert(from == NMT_summary || from == NMT_detail, "Just check");
     // Clean up virtual memory tracking data structures.
     ThreadCritical tc;
-    _reserved_regions.clear();
+    if (_reserved_regions != NULL) {
+      delete _reserved_regions;
+      _reserved_regions = NULL;
+    }
   }
 
   return true;
--- a/hotspot/src/share/vm/services/virtualMemoryTracker.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -320,7 +320,7 @@
 
 
   ReservedMemoryRegion(address base, size_t size) :
-    VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
+    VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone),
     _all_committed(false) { }
 
   // Copy constructor
@@ -414,6 +414,9 @@
  public:
   static bool initialize(NMT_TrackingLevel level);
 
+  // Late phase initialization
+  static bool late_initialize(NMT_TrackingLevel level);
+
   static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
     MEMFLAGS flag = mtNone, bool all_committed = false);
 
@@ -428,7 +431,7 @@
   static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
 
  private:
-  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
+  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
 };
 
 
--- a/hotspot/src/share/vm/utilities/exceptions.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -85,9 +85,13 @@
 #endif // ASSERT
 
   if (thread->is_VM_thread()
-      || thread->is_Compiler_thread() ) {
+      || thread->is_Compiler_thread()
+      || DumpSharedSpaces ) {
     // We do not care what kind of exception we get for the vm-thread or a thread which
     // is compiling.  We just install a dummy exception object
+    //
+    // We also cannot throw a proper exception when dumping, because we cannot run
+    // Java bytecodes now. A dummy exception will suffice.
     thread->set_pending_exception(Universe::vm_exception(), file, line);
     return true;
   }
@@ -108,9 +112,13 @@
   }
 
   if (thread->is_VM_thread()
-      || thread->is_Compiler_thread() ) {
+      || thread->is_Compiler_thread()
+      || DumpSharedSpaces ) {
     // We do not care what kind of exception we get for the vm-thread or a thread which
     // is compiling.  We just install a dummy exception object
+    //
+    // We also cannot throw a proper exception when dumping, because we cannot run
+    // Java bytecodes now. A dummy exception will suffice.
     thread->set_pending_exception(Universe::vm_exception(), file, line);
     return true;
   }
--- a/hotspot/src/share/vm/utilities/nativeCallStack.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/utilities/nativeCallStack.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -27,6 +27,7 @@
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/nativeCallStack.hpp"
 
+const NativeCallStack NativeCallStack::EMPTY_STACK(0, false);
 
 NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
   _hash_value(0) {
--- a/hotspot/src/share/vm/utilities/nativeCallStack.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/utilities/nativeCallStack.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -52,6 +52,9 @@
  *    from it.
  */
 class NativeCallStack : public StackObj {
+ public:
+  static const NativeCallStack EMPTY_STACK;
+
  private:
   address   _stack[NMT_TrackingStackDepth];
   int       _hash_value;
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -352,6 +352,7 @@
 xmlStream*   xtty;
 outputStream* tty;
 outputStream* gclog_or_tty;
+CDS_ONLY(fileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive
 extern Mutex* tty_lock;
 
 #define EXTRACHARLEN   32
@@ -463,7 +464,8 @@
   return buf;
 }
 
-// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// log_name comes from -XX:LogFile=log_name, -Xloggc:log_name or
+// -XX:DumpLoadedClassList=<file_name>
 // in log_name, %p => pid1234 and
 //              %t => YYYY-MM-DD_HH-MM-SS
 static const char* make_log_name(const char* log_name, const char* force_directory) {
@@ -1103,6 +1105,16 @@
     gclog_or_tty = gclog;
   }
 
+#if INCLUDE_CDS
+  // For -XX:DumpLoadedClassList=<file> option
+  if (DumpLoadedClassList != NULL) {
+    const char* list_name = make_log_name(DumpLoadedClassList, NULL);
+    classlist_file = new(ResourceObj::C_HEAP, mtInternal)
+                         fileStream(list_name);
+    FREE_C_HEAP_ARRAY(char, list_name, mtInternal);
+  }
+#endif
+
   // If we haven't lazily initialized the logfile yet, do it now,
   // to avoid the possibility of lazy initialization during a VM
   // crash, which can affect the stability of the fatal error handler.
@@ -1115,6 +1127,11 @@
   static bool ostream_exit_called = false;
   if (ostream_exit_called)  return;
   ostream_exit_called = true;
+#if INCLUDE_CDS
+  if (classlist_file != NULL) {
+    delete classlist_file;
+  }
+#endif
   if (gclog_or_tty != tty) {
       delete gclog_or_tty;
   }
--- a/hotspot/src/share/vm/utilities/ostream.hpp	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -214,6 +214,8 @@
   void flush();
 };
 
+CDS_ONLY(extern fileStream*   classlist_file;)
+
 // unlike fileStream, fdStream does unbuffered I/O by calling
 // open() and write() directly. It is async-safe, but output
 // from multiple thread may be mixed together. Used by fatal
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/stringUtils.cpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/stringUtils.hpp"
+
+int StringUtils::replace_no_expand(char* string, const char* from, const char* to) {
+  int replace_count = 0;
+  size_t from_len = strlen(from);
+  size_t to_len = strlen(to);
+  assert(from_len >= to_len, "must not expand input");
+
+  for (char* dst = string; *dst && (dst = strstr(dst, from)) != NULL;) {
+    char* left_over = dst + from_len;
+    memmove(dst, to, to_len);                       // does not copy trailing 0 of <to>
+    dst += to_len;                                  // skip over the replacement.
+    memmove(dst, left_over, strlen(left_over) + 1); // copies the trailing 0 of <left_over>
+    ++ replace_count;
+  }
+
+  return replace_count;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/stringUtils.hpp	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_STRINGUTILS_HPP
+#define SHARE_VM_UTILITIES_STRINGUTILS_HPP
+
+#include "memory/allocation.hpp"
+
+class StringUtils : AllStatic {
+public:
+  // Replace the substring <from> with another string <to>. <to> must be
+  // no longer than <from>. The input string is modified in-place.
+  //
+  // Replacement is done in a single pass left-to-right. So replace_no_expand("aaa", "aa", "a")
+  // will result in "aa", not "a".
+  //
+  // Returns the count of substrings that have been replaced.
+  static int replace_no_expand(char* string, const char* from, const char* to);
+};
+
+#endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP
--- a/hotspot/test/TEST.groups	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/TEST.groups	Wed Jul 05 19:58:32 2017 +0200
@@ -328,6 +328,21 @@
   sanity/ExecuteInternalVMTests.java
 
 hotspot_runtime = \
+  runtime/ \
+ -runtime/6888954/vmerrors.sh \
+ -runtime/RedefineObject/TestRedefineObject.java \
+ -runtime/8003720/Test8003720.java \
+ -runtime/Metaspace/FragmentMetaspace.java \
+ -runtime/Metaspace/FragmentMetaspaceSimple.java \
+ -runtime/Thread/TestThreadDumpMonitorContention.java \
+ -runtime/SharedArchiveFile/SharedBaseAddress.java \
+ -runtime/memory/ReserveMemory.java \
+ -runtime/Unsafe/RangeCheck.java \
+ -runtime/SharedArchiveFile/CdsSameObjectAlignment.java \
+ -runtime/SharedArchiveFile/DefaultUseWithClient.java \
+ -runtime/Thread/CancellableThreadTest.java
+
+hotspot_runtime_closed = \
   sanity/ExecuteInternalVMTests.java
 
 hotspot_serviceability = \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/IntegerArithmetic/TestIntegerComparison.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestIntegerComparison
+ * @bug 8043284 8042786
+ * @summary "Tests optimizations of signed and unsigned integer comparison."
+ * @run main/othervm -server -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:CompileOnly=TestIntegerComparison::testSigned,TestIntegerComparison::testUnsigned TestIntegerComparison
+ */
+public class TestIntegerComparison {
+  /**
+   * Tests optimization of signed integer comparison (see BoolNode::Ideal).
+   * The body of the if statement is unreachable and should not be compiled.
+   * @param c Character (value in the integer range [0, 65535])
+   */
+  public static void testSigned(char c) {
+    // The following addition may overflow. The result is in one
+    // of the two ranges [IntMax] and [IntMin, IntMin + CharMax - 1].
+    int result = c + Integer.MAX_VALUE;
+    // CmpINode has to consider both result ranges instead of only
+    // the general [IntMin, IntMax] range to be able to prove that
+    // result is always unequal to CharMax.
+    if (result == Character.MAX_VALUE) {
+      // Unreachable
+      throw new RuntimeException("Should not reach here!");
+    }
+  }
+
+  /**
+   * Tests optimization of unsigned integer comparison (see CmpUNode::Value).
+   * The body of the if statement is unreachable and should not be compiled.
+   * @param c Character (value in the integer range [0, 65535])
+   */
+  public static void testUnsigned(char c) {
+    /*
+     * The following if statement consisting of two CmpIs is replaced
+     * by a CmpU during optimization (see 'IfNode::fold_compares').
+     *
+     * The signed (lo < i) and (i < hi) are replaced by the unsigned
+     * (i - (lo+1) < hi - (lo+1)). In this case the unsigned comparison
+     * equals (result - 2) < 98 leading to the following CmpUNode:
+     *
+     * CmpU (AddI result, -2) 98
+     *
+     * With the value of result this is simplified to:
+     *
+     * CmpU (AddI c, -(CharMax - IntMin)) 98
+     *
+     * The subtraction may underflow. The result is in one of the two
+     * ranges [IntMin], [IntMax - CharMax + 1]. Both ranges have to be
+     * considered instead of only the general [IntMin, IntMax] to prove
+     * that due to the overflow the signed comparison result < 98 is
+     * always false.
+     */
+    int result = c - (Character.MAX_VALUE - Integer.MIN_VALUE) + 2;
+    if (1 < result && result < 100) {
+      // Unreachable
+      throw new RuntimeException("Should not reach here!");
+    }
+  }
+
+  /**
+   * Tests optimizations of signed and unsigned integer comparison.
+   */
+  public static void main(String[] args) {
+    // We use characters to get a limited integer range for free
+    for (int i = Character.MIN_VALUE; i <= Character.MAX_VALUE; ++i) {
+      testSigned((char) i);
+      testUnsigned((char) i);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/arraycopy/TestMissingControl.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8055153
+ * @summary missing control on LoadRange and LoadKlass when array copy macro node is expanded
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation TestMissingControl
+ *
+ */
+public class TestMissingControl {
+
+    static int[] m1(int[] a2) {
+        int[] a1 = new int[10];
+        System.arraycopy(a1, 0, a2, 0, 10);
+        return a1;
+    }
+
+    static class A {
+    }
+
+    static Object m2(Object[] a2) {
+        A[] a1 = new A[10];
+        System.arraycopy(a1, 0, a2, 0, 10);
+        return a1;
+    }
+
+    static void test1() {
+        int[] a2 = new int[10];
+        int[] a3 = new int[5];
+
+        // compile m1 with arraycopy intrinsified
+        for (int i = 0; i < 20000; i++) {
+            m1(a2);
+        }
+
+        // make m1 trap
+        for (int i = 0; i < 10; i++) {
+            try {
+                m1(a3);
+            } catch(IndexOutOfBoundsException ioobe) {
+            }
+        }
+
+        // recompile m1
+        for (int i = 0; i < 20000; i++) {
+            m1(a2);
+        }
+
+        try {
+            m1(null);
+        } catch(NullPointerException npe) {}
+    }
+
+    static void test2() {
+        A[] a2 = new A[10];
+        A[] a3 = new A[5];
+
+        // compile m2 with arraycopy intrinsified
+        for (int i = 0; i < 20000; i++) {
+            m2(a2);
+        }
+
+        // make m2 trap
+        for (int i = 0; i < 10; i++) {
+            try {
+                m2(a3);
+            } catch(IndexOutOfBoundsException ioobe) {
+            }
+        }
+
+        // recompile m2
+        for (int i = 0; i < 20000; i++) {
+            m2(a2);
+        }
+
+        try {
+            m2(null);
+        } catch(NullPointerException npe) {}
+    }
+
+    static public void main(String[] args) {
+        test1();
+        test2();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+import sun.misc.Unsafe;
+import sun.misc.IOUtils;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLConnection;
+
+/*
+ * @test TestAnonymousClassUnloading
+ * @bug 8054402
+ * @summary "Tests unloading of anonymous classes."
+ * @library /testlibrary /testlibrary/whitebox
+ * @compile TestAnonymousClassUnloading.java
+ * @run main ClassFileInstaller TestAnonymousClassUnloading sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading
+ */
+public class TestAnonymousClassUnloading {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+    private static int COMP_LEVEL_SIMPLE = 1;
+    private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
+
+    /**
+     * We override hashCode here to be able to access this implementation
+     * via an Object reference (we cannot cast to TestAnonymousClassUnloading).
+     */
+    @Override
+    public int hashCode() {
+        return 42;
+    }
+
+    /**
+     * Does some work by using the anonymousClass.
+     * @param anonymousClass Class performing some work (will be unloaded)
+     */
+    static private void doWork(Class<?> anonymousClass) throws InstantiationException, IllegalAccessException {
+        // Create a new instance
+        Object anon = anonymousClass.newInstance();
+        // We would like to call a method of anonymousClass here but we cannot cast because the class
+        // was loaded by a different class loader. One solution would be to use reflection but since
+        // we want C2 to implement the call as an IC we call Object::hashCode() here which actually
+        // calls anonymousClass::hashCode(). C2 will then implement this call as an IC.
+        if (anon.hashCode() != 42) {
+            new RuntimeException("Work not done");
+        }
+    }
+
+    /**
+     * Makes sure that method is compiled by forcing compilation if not yet compiled.
+     * @param m Method to be checked
+     */
+    static private void makeSureIsCompiled(Method m) {
+        // Make sure background compilation is disabled
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new RuntimeException("Background compilation enabled");
+        }
+
+        // Check if already compiled
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
+            }
+            // Because background compilation is disabled, method should now be compiled
+            if(!WHITE_BOX.isMethodCompiled(m)) {
+                throw new RuntimeException(m + " not compiled");
+            }
+        }
+    }
+
+    /**
+     * This test creates stale Klass* metadata referenced by a compiled IC.
+     *
+     * The following steps are performed:
+     * (1) An anonymous version of TestAnonymousClassUnloading is loaded by a custom class loader
+     * (2) The method doWork that calls a method of the anonymous class is compiled. The call
+     *     is implemented as an IC referencing Klass* metadata of the anonymous class.
+     * (3) Unloading of the anonymous class is enforced. The IC now references dead metadata.
+     */
+    static public void main(String[] args) throws Exception {
+        // (1) Load an anonymous version of this class using the corresponding Unsafe method
+        URL classUrl = TestAnonymousClassUnloading.class.getResource("TestAnonymousClassUnloading.class");
+        URLConnection connection = classUrl.openConnection();
+        byte[] classBytes = IOUtils.readFully(connection.getInputStream(), connection.getContentLength(), true);
+        Class<?> anonymousClass = UNSAFE.defineAnonymousClass(TestAnonymousClassUnloading.class, classBytes, null);
+
+        // (2) Make sure all paths of doWork are profiled and compiled
+        for (int i = 0; i < 100000; ++i) {
+            doWork(anonymousClass);
+        }
+
+        // Make sure doWork is compiled now
+        Method doWork = TestAnonymousClassUnloading.class.getDeclaredMethod("doWork", Class.class);
+        makeSureIsCompiled(doWork);
+
+        // (3) Throw away reference to anonymousClass to allow unloading
+        anonymousClass = null;
+
+        // Force garbage collection to trigger unloading of anonymousClass
+        // Dead metadata reference to anonymousClass triggers JDK-8054402
+        WHITE_BOX.fullGC();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/exceptions/TestRecursiveReplacedException.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8054224
+ * @summary Recursive method compiled by C1 is unable to catch StackOverflowError
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test.run -XX:+TieredCompilation -XX:TieredStopAtLevel=2 -Xss256K TestRecursiveReplacedException
+ *
+ */
+
+public class TestRecursiveReplacedException {
+
+    public static void main(String args[]) {
+        new TestRecursiveReplacedException().run();
+    }
+
+    public void run() {
+        try {
+            run();
+        } catch (Throwable t) {
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/membars/TestMemBarAcquire.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestMemBarAcquire
+ * @bug 8048879
+ * @summary "Tests optimization of MemBarAcquireNodes"
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation TestMemBarAcquire
+ */
+public class TestMemBarAcquire {
+  private volatile static Object defaultObj = new Object();
+  private Object obj;
+
+  public TestMemBarAcquire(Object param) {
+    // Volatile load. MemBarAcquireNode is added after the
+    // load to prevent following loads from floating up past.
+    // StoreNode is added to store result of load in 'obj'.
+    this.obj = defaultObj;
+    // Overrides 'obj' and therefore makes previous StoreNode
+    // and the corresponding LoadNode useless. However, the
+    // LoadNode is still connected to the MemBarAcquireNode
+    // that should now release the reference.
+    this.obj = param;
+  }
+
+  public static void main(String[] args) throws Exception {
+    // Make sure TestMemBarAcquire::<init> is compiled
+    for (int i = 0; i < 100000; ++i) {
+      TestMemBarAcquire p = new TestMemBarAcquire(new Object());
+    }
+  }
+}
+
--- a/hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Wed Jul 05 19:58:32 2017 +0200
@@ -46,6 +46,7 @@
   private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
       "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions",
       "-XX:+WhiteBoxAPI",
       "-XX:MetaspaceSize=" + MetaspaceSize,
       "-Xmn" + YoungGenSize,
--- a/hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Wed Jul 05 19:58:32 2017 +0200
@@ -46,6 +46,7 @@
   private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
       "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions",
       "-XX:+WhiteBoxAPI",
       "-XX:MetaspaceSize=" + MetaspaceSize,
       "-Xmn" + YoungGenSize,
--- a/hotspot/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Wed Jul 05 19:58:32 2017 +0200
@@ -46,6 +46,8 @@
 }
 
 class ReclaimRegionFast {
+    public static final long MAX_MILLIS_FOR_RUN = 50 * 1000; // The maximum runtime for the actual test.
+
     public static final int M = 1024*1024;
 
     public static LinkedList<Object> garbageList = new LinkedList<Object>();
@@ -83,7 +85,14 @@
 
         Object ref_from_stack = large1;
 
+        long start_millis = System.currentTimeMillis();
+
         for (int i = 0; i < 20; i++) {
+            long current_millis = System.currentTimeMillis();
+            if ((current_millis - start_millis) > MAX_MILLIS_FOR_RUN) {
+              System.out.println("Finishing test because maximum runtime exceeded");
+              break;
+            }
             // A set of large objects that will be reclaimed eagerly - and hopefully marked.
             large1 = new int[M - 20];
             large2 = new int[M - 20];
--- a/hotspot/test/runtime/7158988/FieldMonitor.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/7158988/FieldMonitor.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,7 @@
  * @bug 7158988
  * @key regression
  * @summary verify jvm does not crash while debugging
+ * @ignore 8055145
  * @run compile TestPostFieldModification.java
  * @run main/othervm FieldMonitor
  * @author axel.siebenborn@sap.com
--- a/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/CompressedOops/CompressedClassPointers.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,6 +26,7 @@
  * @bug 8024927
  * @summary Testing address of compressed class pointer space as best as possible.
  * @library /testlibrary
+ * @ignore 8055164
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/AutoshutdownNMT.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/AutoshutdownNMT.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Test for deprecated message if -XX:-AutoShutdownNMT is specified
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/BaselineWithParameter.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/BaselineWithParameter.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @key nmt jcmd regression
  * @summary Regression test for invoking a jcmd with baseline=false, result was that the target VM crashed
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail BaselineWithParameter
  */
 
--- a/hotspot/test/runtime/NMT/CommandLineDetail.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineDetail.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Running with NMT detail should not result in an error
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineEmptyArgument.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,7 @@
  * @key nmt
  * @summary Empty argument to NMT should result in an informative error message
  * @library /testlibrary
- * @ignore
+ * @ignore 8055051
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineInvalidArgument.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineInvalidArgument.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Invalid argument to NMT should result in an informative error message
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineSummary.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineSummary.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Running with NMT summary should not result in an error
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/CommandLineTurnOffNMT.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/CommandLineTurnOffNMT.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Turning off NMT should not result in an error
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/JcmdBaselineDetail.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdBaselineDetail.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Verify that jcmd correctly reports that baseline succeeds with NMT enabled with detailed tracking.
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail JcmdBaselineDetail
  */
 
--- a/hotspot/test/runtime/NMT/JcmdScale.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdScale.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Test the NMT scale parameter
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=summary JcmdScale
  */
 
--- a/hotspot/test/runtime/NMT/JcmdScaleDetail.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdScaleDetail.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Test the NMT scale parameter with detail tracking level
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail JcmdScaleDetail
  */
 
--- a/hotspot/test/runtime/NMT/JcmdSummaryDiff.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdSummaryDiff.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build JcmdSummaryDiff
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary JcmdSummaryDiff
  */
--- a/hotspot/test/runtime/NMT/JcmdWithNMTDisabled.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/JcmdWithNMTDisabled.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Verify that jcmd correctly reports that NMT is not enabled
  * @library /testlibrary
- * @ignore
  * @run main JcmdWithNMTDisabled 1
  */
 
--- a/hotspot/test/runtime/NMT/MallocRoundingReportTest.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/MallocRoundingReportTest.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build MallocRoundingReportTest
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocRoundingReportTest
  *
--- a/hotspot/test/runtime/NMT/MallocTestType.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/MallocTestType.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build MallocTestType
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTestType
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/NMTWithCDS.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8055061
+ * @key nmt
+ * @library /testlibrary
+ * @run main NMTWithCDS
+ */
+import com.oracle.java.testlibrary.*;
+
+public class NMTWithCDS {
+
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    try {
+      output.shouldContain("Loading classes to share");
+      output.shouldHaveExitValue(0);
+
+      pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("sharing");
+      output.shouldHaveExitValue(0);
+
+    } catch (RuntimeException e) {
+      // Report 'passed' if CDS was turned off.
+      output.shouldContain("Unable to use shared archive");
+      output.shouldHaveExitValue(1);
+    }
+  }
+}
--- a/hotspot/test/runtime/NMT/PrintNMTStatistics.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/PrintNMTStatistics.java	Wed Jul 05 19:58:32 2017 +0200
@@ -28,7 +28,6 @@
  * @summary Make sure PrintNMTStatistics works on normal JVM exit
  * @library /testlibrary /testlibrary/whitebox
  * @build PrintNMTStatistics
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main PrintNMTStatistics
--- a/hotspot/test/runtime/NMT/PrintNMTStatisticsWithNMTDisabled.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/PrintNMTStatisticsWithNMTDisabled.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt
  * @summary Trying to enable PrintNMTStatistics should result in a warning
  * @library /testlibrary
- * @ignore
  */
 
 import com.oracle.java.testlibrary.*;
--- a/hotspot/test/runtime/NMT/ReleaseCommittedMemory.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/ReleaseCommittedMemory.java	Wed Jul 05 19:58:32 2017 +0200
@@ -28,7 +28,6 @@
  * @key nmt regression
  * @library /testlibrary /testlibrary/whitebox
  * @build ReleaseCommittedMemory
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ReleaseCommittedMemory
--- a/hotspot/test/runtime/NMT/ReleaseNoCommit.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/ReleaseNoCommit.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @key nmt regression
  * @library /testlibrary /testlibrary/whitebox
  * @build ReleaseNoCommit
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary ReleaseNoCommit
  */
--- a/hotspot/test/runtime/NMT/ShutdownTwice.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/ShutdownTwice.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Run shutdown twice
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail ShutdownTwice
  */
 
--- a/hotspot/test/runtime/NMT/SummaryAfterShutdown.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/SummaryAfterShutdown.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @summary Verify that jcmd correctly reports that NMT is not enabled after a shutdown
  * @library /testlibrary
- * @ignore
  * @run main/othervm -XX:NativeMemoryTracking=detail SummaryAfterShutdown
  */
 
--- a/hotspot/test/runtime/NMT/SummarySanityCheck.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/SummarySanityCheck.java	Wed Jul 05 19:58:32 2017 +0200
@@ -27,7 +27,6 @@
  * @summary Sanity check the output of NMT
  * @library /testlibrary /testlibrary/whitebox
  * @build SummarySanityCheck
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:NativeMemoryTracking=summary -XX:+WhiteBoxAPI SummarySanityCheck
--- a/hotspot/test/runtime/NMT/ThreadedMallocTestType.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/ThreadedMallocTestType.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build ThreadedMallocTestType
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedMallocTestType
--- a/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
  * @build ThreadedVirtualAllocTestType
- * @ignore
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedVirtualAllocTestType
--- a/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/NMT/VirtualAllocTestType.java	Wed Jul 05 19:58:32 2017 +0200
@@ -26,7 +26,6 @@
  * @summary Test Reserve/Commit/Uncommit/Release of virtual memory and that we track it correctly
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
- * @ignore
  * @build VirtualAllocTestType
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
--- a/hotspot/test/runtime/jsig/Test8017498.sh	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/runtime/jsig/Test8017498.sh	Wed Jul 05 19:58:32 2017 +0200
@@ -31,6 +31,7 @@
 ## @bug 8022301
 ## @bug 8025519
 ## @summary sigaction(sig) results in process hang/timed-out if sig is much greater than SIGRTMAX
+## @ignore 8041727
 ## @run shell/timeout=60 Test8017498.sh
 ##
 
--- a/hotspot/test/sanity/WhiteBox.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/sanity/WhiteBox.java	Wed Jul 05 19:58:32 2017 +0200
@@ -29,7 +29,6 @@
  * @library /testlibrary
  * @compile WhiteBox.java
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI sun.hotspot.WhiteBox
  * @clean sun.hotspot.WhiteBox
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/BuildHelper.java	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.java.testlibrary;
+
+import java.io.File;
+import java.io.FileReader;
+import java.util.Properties;
+
+public class BuildHelper {
+
+    /**
+     * Commercial builds should have the BUILD_TYPE set to commercial
+     * within the release file, found at the root of the JDK.
+     */
+    public static boolean isCommercialBuild() throws Exception {
+        String buildType = getReleaseProperty("BUILD_TYPE","notFound");
+        return buildType.equals("commercial");
+    }
+
+
+    /**
+     * Return the value for property key, or defaultValue if no property not found.
+     * If present, double quotes are trimmed.
+     */
+    public static String getReleaseProperty(String key, String defaultValue) throws Exception {
+        Properties properties = getReleaseProperties();
+        String value = properties.getProperty(key, defaultValue);
+        return trimDoubleQuotes(value);
+    }
+
+    /**
+     * Return the value for property key, or null if no property not found.
+     * If present, double quotes are trimmed.
+     */
+    public static String getReleaseProperty(String key) throws Exception {
+        return getReleaseProperty(key, null);
+    }
+
+    /**
+     * Get properties from the release file
+     */
+    public static Properties getReleaseProperties() throws Exception {
+        Properties properties = new Properties();
+        properties.load(new FileReader(getReleaseFile()));
+        return properties;
+    }
+
+    /**
+     * Every JDK has a release file in its root.
+     * @return A handler to the release file.
+     */
+    public static File getReleaseFile() throws Exception {
+        String jdkPath = getJDKRoot();
+        File releaseFile = new File(jdkPath,"release");
+        if ( ! releaseFile.canRead() ) {
+            throw new Exception("Release file is not readable, or it is absent: " +
+                    releaseFile.getCanonicalPath());
+        }
+        return releaseFile;
+    }
+
+    /**
+     * Returns path to the JDK under test.
+     * This path is obtained through the test.jdk property, usually set by JTREG.
+     */
+    public static String getJDKRoot() {
+        String jdkPath = System.getProperty("test.jdk");
+        if (jdkPath == null) {
+            throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. "
+                    + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'.");
+        }
+        return jdkPath;
+    }
+
+    /**
+     * Trim double quotes from the beginning and the end of the given string.
+     * @param original string to trim.
+     * @return a new trimmed string.
+     */
+    public static String trimDoubleQuotes(String original) {
+        if (original == null) { return null; }
+        String trimmed = original.replaceAll("^\"+|\"+$", "");
+        return trimmed;
+    }
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Aug 29 11:58:43 2014 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 19:58:32 2017 +0200
@@ -73,6 +73,8 @@
   // Memory
   public native long getObjectAddress(Object o);
   public native int  getHeapOopSize();
+  public native boolean isObjectInOldGen(Object o);
+  public native long getObjectSize(Object o);
 
   // Runtime
   // Make sure class name is in the correct format
@@ -150,6 +152,9 @@
   public native long allocateMetaspace(ClassLoader classLoader, long size);
   public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
 
+  // force Young GC
+  public native void youngGC();
+
   // force Full GC
   public native void fullGC();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/CheckModules.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# Default target declared first
+default: all
+
+include $(SPEC)
+include MakeBase.gmk
+
+JDEPS_MODULES_XML := $(JDK_OUTPUTDIR)/modules/jdk.dev/com/sun/tools/jdeps/resources/jdeps-modules.xml
+
+#
+# Verify access across module boundaries
+#
+checkdeps:
+	$(ECHO) "Checking dependencies across JDK modules"
+	$(JAVA) -Xbootclasspath/p:$(INTERIM_LANGTOOLS_JAR) \
+		-Djdeps.modules.xml=$(JDEPS_MODULES_XML) \
+		com.sun.tools.jdeps.Main \
+		-verify:access -mp $(JDK_OUTPUTDIR)/modules
+
+all: checkdeps
+
+.PHONY: all
--- a/make/CompileJavaModules.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/CompileJavaModules.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -28,8 +28,9 @@
 
 include $(SPEC)
 include MakeBase.gmk
+include Modules.gmk
 include JavaCompilation.gmk
-include SetupJava.gmk
+include SetupJavaCompilers.gmk
 
 # Hook to include the corresponding custom file, if present.
 $(eval $(call IncludeCustomExtension, , CompileJavaModules.gmk))
@@ -399,36 +400,6 @@
 
 ################################################################################
 
-ifneq ($(OPENJDK_TARGET_OS), solaris)
-  jdk.attach_EXCLUDE_FILES += \
-      sun/tools/attach/SolarisAttachProvider.java \
-      sun/tools/attach/SolarisVirtualMachine.java \
-      #
-endif
-
-ifneq ($(OPENJDK_TARGET_OS), linux)
-  jdk.attach_EXCLUDE_FILES += \
-      sun/tools/attach/LinuxAttachProvider.java \
-      sun/tools/attach/LinuxVirtualMachine.java \
-      #
-endif
-
-ifneq ($(OPENJDK_TARGET_OS), macosx)
-  jdk.attach_EXCLUDE_FILES += \
-      sun/tools/attach/BsdAttachProvider.java \
-      sun/tools/attach/BsdVirtualMachine.java \
-      #
-endif
-
-ifneq ($(OPENJDK_TARGET_OS),aix)
-  jdk.attach_EXCLUDE_FILES += \
-      sun/tools/attach/AixAttachProvider.java \
-      sun/tools/attach/AixVirtualMachine.java \
-      #
-endif
-
-################################################################################
-
 jdk.jvmstat_COPY := aliasmap
 
 ################################################################################
--- a/make/Javadoc.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/Javadoc.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -52,7 +52,6 @@
 BUILD_NUMBER=$(JDK_BUILD_NUMBER)
 
 JAVADOC_CMD = $(JAVA) \
-    -Xmx1024m \
     -Djava.awt.headless=true \
     $(NEW_JAVADOC) \
     -bootclasspath $(JDK_OUTPUTDIR)/classes
--- a/make/Main.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/Main.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -37,6 +37,7 @@
 
 # Load the vital tools for all the makefiles.
 include $(SRC_ROOT)/make/common/MakeBase.gmk
+include $(SRC_ROOT)/make/common/Modules.gmk
 
 # Load common profile names definitions
 include $(JDK_TOPDIR)/make/ProfileNames.gmk
@@ -234,8 +235,7 @@
 	+($(CD) $(NASHORN_TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f BuildNashorn.gmk all)
 
 # Creates the jar files (rt.jar resources.jar etc)
-# this depends on all modules built
-main-jars: exploded-image
+main-jars:
 	+($(CD) $(JDK_TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CreateJars.gmk)
 
 # Creates the images (j2sdk-image j2re-image etc)
@@ -297,14 +297,10 @@
 ################################################################################
 # Verification targets
 
-# generate modules.xml in the exploded image
-modules-xml:
-	+($(CD) $(JDK_TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f ModulesXml.gmk gen-modules-xml)
+verify-modules:
+	+($(CD) $(SRC_ROOT)/make && $(MAKE) $(MAKE_ARGS) -f CheckModules.gmk)
 
-verify-modules:
-	+($(CD) $(JDK_TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f ModulesXml.gmk checkdeps)
-
-ALL_TARGETS += modules-xml verify-modules
+ALL_TARGETS += verify-modules
 
 ################################################################################
 # Install targets
@@ -355,6 +351,9 @@
 
   $(LAUNCHER_TARGETS): java.base-libs
 
+  # The demos are currently linking to libjvm and libjava, just like all other
+  # jdk libs, even though they don't need to. To avoid warnings, make sure they
+  # aren't built until after libjava and libjvm are available to link to.
   demos: $(JAVA_TARGETS)
 
   # Declare dependency from <module>-java to <module>-gensrc
@@ -381,16 +380,18 @@
 
   # This dependency needs to be explicitly declared. jdk.jdi-gensrc generates a 
   # header file used by jdk.jdwp libs.
-  jdk.jdwp-libs: jdk.jdi-gensrc
+  jdk.jdwp.agent-libs: jdk.jdi-gensrc
 
   # Explicitly add dependencies for special targets
   java.base-java: unpack-sec
 
+  jdk.dev-gendata: java rmic
+
   security-jars: java
 
   nashorn-jar: jdk.scripting.nashorn-java
 
-  main-jars: java rmic security-jars nashorn-jar policy-jars import-hotspot
+  main-jars: java rmic security-jars nashorn-jar policy-jars import-hotspot gendata
 
   # On windows, the jars target needs to wait for jgss libs to be built.
   # Should ideally split out the sec-bin zip file generation to avoid
@@ -418,9 +419,7 @@
 
   test: exploded-image
 
-  modules-xml: build-tools-jdk java
-
-  verify-modules: exploded-image modules-xml
+  verify-modules: exploded-image
 
 endif
 
@@ -457,7 +456,10 @@
 ALL_MODULE_TARGETS := $(sort $(GENSRC_MODULES) $(JAVA_MODULES) \
     $(GENDATA_MODULES) $(LIB_MODULES) $(LAUNCHER_MODULES) $(COPY_MODULES))
 
-exploded-image: $(ALL_MODULE_TARGETS) modules-xml
+exploded-image: $(ALL_MODULE_TARGETS)
+# The old 'jdk' target most closely matches the new exploded-image. Keep an
+# alias for ease of use.
+jdk: exploded-image
 
 jars: main-jars nashorn-jar security-jars policy-jars
 
@@ -469,7 +471,7 @@
 docs: docs-javadoc docs-jvmtidoc
 
 ALL_TARGETS += gensrc gendata copy java rmic libs launchers \
-    $(ALL_MODULE_TARGETS) exploded-image jars \
+    $(ALL_MODULE_TARGETS) exploded-image jdk jars \
     $(ALL_PROFILES) profiles docs
 
 ################################################################################
@@ -489,6 +491,12 @@
 # If running a clean target, disable parallel execution
 ifneq ($(findstring clean, $(MAKECMDGOALS)), )
   .NOTPARALLEL:
+  # It's not recommended to run additional targets to clean on the same make
+  # command line. Try to detect this and issue a warning.
+  ifneq ($(filter-out clean%, $(MAKECMDGOALS)), )
+    $(warning Mixing clean targets with normal build targets will not work well \
+        and is not recommended.)
+  endif
 endif
 
 CLEAN_COMPONENTS += langtools corba hotspot jdk nashorn images \
--- a/make/MakeHelpers.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/MakeHelpers.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -303,4 +303,32 @@
 	@$(PRINTF) " done\n"
 endef
 
+################################################################################
+
+MAKE_DIR_LIST := $(JDK_TOPDIR)/make
+
+# Find all modules that has a makefile for a certain build phase
+# Param 1: Make subdir to look in
+# Param 2: File prefix to look for
+FindModulesWithMakefileFor = $(sort $(foreach d, $(MAKE_DIR_LIST), \
+    $(patsubst $d/$(strip $1)/$(strip $2)-%.gmk,%, \
+    $(wildcard $d/$(strip $1)/$(strip $2)-*.gmk))))
+
+# Declare a recipe for calling such a makefile
+# Param 1: Module name
+# Param 2: Suffix for rule
+# Param 3: Make subdir
+# Param 4: Makefile prefix
+define DeclareRecipeForModuleMakefile
+  $$(strip $1)-$$(strip $2):
+	+($(CD) $$(dir $$(firstword $$(wildcard $$(addsuffix /$$(strip $3)/$$(strip $4)-$$(strip $1).gmk, \
+	        $(MAKE_DIR_LIST))))) \
+	    && $(MAKE) $(MAKE_ARGS) \
+	    -f $$(strip $4)-$$(strip $1).gmk \
+	    $$(addprefix -I, $$(wildcard $(MAKE_DIR_LIST) \
+	        $$(addsuffix /$$(strip $3), $(MAKE_DIR_LIST)))))
+endef
+
+################################################################################
+
 endif # _MAKEHELPERS_GMK
--- a/make/common/IdlCompilation.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/common/IdlCompilation.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -60,7 +60,7 @@
 	$(RM) -rf $3/$$($4_TMPDIR)
 	$(MKDIR) -p $(dir $5)
 	$(ECHO) $(LOG_INFO) Compiling IDL $(patsubst $2/%,%,$4)
-	LC_ALL=C $8 -td $3/$$($4_TMPDIR) \
+	$8 -td $3/$$($4_TMPDIR) \
 	    -i $2/org/omg/CORBA \
 	    -i $2/org/omg/PortableInterceptor \
 	    -i $2/org/omg/PortableServer \
--- a/make/common/JavaCompilation.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/common/JavaCompilation.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -564,7 +564,7 @@
     endif
 
     # Using sjavac to compile.
-    $1_COMPILE_TARGETS := $$($1_BIN)/javac_state
+    $1_COMPILE_TARGETS := $$($1_BIN)/_the.$1_batch
 
     # Create SJAVAC variable form JAVAC variable. Expects $1_JAVAC to be
     # "bootclasspathprepend -cp .../javac.jar com.sun.tools.javac.Main"
@@ -574,8 +574,12 @@
     # Set the $1_REMOTE to spawn a background javac server.
     $1_REMOTE:=--server:portfile=$$($1_SJAVAC_PORTFILE),id=$1,sjavac=$$(subst $$(SPACE),%20,$$(subst $$(COMMA),%2C,$$(strip $$($1_SERVER_JVM) $$($1_SJAVAC))))
 
-    $$($1_BIN)/javac_state: $$($1_SRCS) $$($1_DEPENDS)
+    $$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS)
 	$(MKDIR) -p $$(@D)
+        # As a workaround for sjavac not tracking api changed from the classpath, force full
+        # recompile if an external dependency, which is something other than a source
+        # change, triggered this compilation.
+	$$(if $$(filter-out $$($1_SRCS), $$?), $(FIND) $$(@D) -name "*.class" $(FIND_DELETE))
 	$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.$1_batch.tmp)
 	$(ECHO) Compiling $1
 	($$($1_JVM) $$($1_SJAVAC) \
@@ -590,8 +594,16 @@
 	    $$($1_HEADERS_ARG) \
 	    -d $$($1_BIN) && \
 	$(MV) $$($1_BIN)/_the.$1_batch.tmp $$($1_BIN)/_the.$1_batch)
-        # sjavac doesn't touch this if nothing has changed
-	$(TOUCH) $$@
+        # Create a pubapi file that only changes when the pubapi changes. Dependent
+        # compilations can use this file to only get recompiled when pubapi has changed.
+        # Grep returns 1 if no matching lines are found. Do not fail for this.
+	$(GREP) -e "^I" $$($1_BIN)/javac_state > $$($1_BIN)/_the.$1_pubapi.tmp \
+	    || test "$$$$?" = "1"
+	if [ ! -f $$($1_BIN)/_the.$1_pubapi ] \
+	    || [ "`$(DIFF) $$($1_BIN)/_the.$1_pubapi $$($1_BIN)/_the.$1_pubapi.tmp`" != "" ]; then \
+	  $(MV) $$($1_BIN)/_the.$1_pubapi.tmp $$($1_BIN)/_the.$1_pubapi; \
+	fi
+
   else
     # Using plain javac to batch compile everything.
     $1_COMPILE_TARGETS := $$($1_BIN)/_the.$1_batch
@@ -681,7 +693,7 @@
 # param 1 is for example BUILD_MYPACKAGE
 # param 2 is the output directory (BIN)
 define SetupJavaCompilationCompileTarget
-  $(if $(findsring yes, $(ENABLE_SJAVAC)), $(strip $2)/javac_state, \
+  $(if $(findstring yes, $(ENABLE_SJAVAC)), $(strip $2)/_the.$(strip $1)_pubapi, \
       $(strip $2)/_the.$(strip $1)_batch)
 endef
 endif
--- a/make/common/MakeBase.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/common/MakeBase.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -25,7 +25,6 @@
 
 ################################################################
 #
-# Check that GNU make and cygwin are recent enough.
 # Setup common utility functions.
 #
 ################################################################
@@ -533,71 +532,6 @@
 endef
 
 ################################################################################
-# Module list macros
-
-ALL_TOP_SRC_DIRS := \
-    $(JDK_TOPDIR)/src \
-    $(LANGTOOLS_TOPDIR)/src \
-    $(CORBA_TOPDIR)/src \
-    $(JAXP_TOPDIR)/src \
-    $(JAXWS_TOPDIR)/src \
-    $(NASHORN_TOPDIR)/src \
-    #
-
-# There are snmp classes in the open but they are not included in OpenJDK
-JAVA_MODULES_FILTER := jdk.snmp
-
-# Find all modules with java sources by looking in the source dirs
-define FindJavaModules
-  $(filter-out $(JAVA_MODULES_FILTER), $(sort $(notdir \
-      $(patsubst %/,%, $(dir $(patsubst %/,%, $(dir $(patsubst %/,%, $(dir \
-      $(wildcard $(patsubst %,%/*/share/classes/*, $(ALL_TOP_SRC_DIRS)) \
-          $(patsubst %,%/*/$(OPENJDK_TARGET_OS_API_DIR)/classes/*, $(ALL_TOP_SRC_DIRS)) \
-          $(patsubst %,%/*/$(OPENJDK_TARGET_OS)/classes/*, $(ALL_TOP_SRC_DIRS))))))))))))
-endef
-
-MODULES_LIST_FILE := $(SRC_ROOT)/make/common/modules.list
-
-# Param 1: Module to find deps for
-define FindDepsForModule
-  $(if $(filter-out java.base, $1), java.base $(filter-out jdk.scripting.nashorn, $(filter-out java.base, $(wordlist 2, 100, $(shell $(GREP) '^$(strip $1):' $(MODULES_LIST_FILE))))))
-endef
-
-# Find all modules with source for the target platform.
-define FindAllModules
-  $(sort $(filter-out closed demo sample, $(notdir $(patsubst %/,%, $(dir \
-      $(wildcard $(patsubst %, %/*/share, $(ALL_TOP_SRC_DIRS)) \
-      $(patsubst %, %/*/$(OPENJDK_TARGET_OS), $(ALL_TOP_SRC_DIRS)) \
-      $(patsubst %, %/*/$(OPENJDK_TARGET_OS_API_DIR), $(ALL_TOP_SRC_DIRS))))))))
-endef
-
-################################################################################
-
-MAKE_DIR_LIST := $(JDK_TOPDIR)/make
-
-# Find all modules that has a makefile for a certain build phase
-# Param 1: Make subdir to look in
-# Param 2: File prefix to look for
-FindModulesWithMakefileFor = $(sort $(foreach d, $(MAKE_DIR_LIST), \
-    $(patsubst $d/$(strip $1)/$(strip $2)-%.gmk,%, \
-    $(wildcard $d/$(strip $1)/$(strip $2)-*.gmk))))
-
-# Declare a recipe for calling such a makefile
-# Param 1: Module name
-# Param 2: Suffix for rule
-# Param 3: Make subdir
-# Param 4: Makefile prefix
-define DeclareRecipeForModuleMakefile
-  $$(strip $1)-$$(strip $2):
-	+($(CD) $$(dir $$(firstword $$(wildcard $$(addsuffix /$$(strip $3)/$$(strip $4)-$$(strip $1).gmk, \
-	        $(MAKE_DIR_LIST))))) \
-	    && $(MAKE) $(MAKE_ARGS) \
-	    -f $$(strip $4)-$$(strip $1).gmk \
-	    $$(addprefix -I, $$(wildcard $(MAKE_DIR_LIST) \
-	        $$(addsuffix /$$(strip $3), $(MAKE_DIR_LIST)))))
-endef
-
-################################################################################
 
 # Hook to include the corresponding custom file, if present.
 $(eval $(call IncludeCustomExtension, , common/MakeBase.gmk))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/common/Modules.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,79 @@
+#
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+ifndef _MODULES_GMK
+_MODULES_GMK := 1
+
+################################################################################
+# Module list macros
+
+ALL_TOP_SRC_DIRS := \
+    $(JDK_TOPDIR)/src \
+    $(LANGTOOLS_TOPDIR)/src \
+    $(CORBA_TOPDIR)/src \
+    $(JAXP_TOPDIR)/src \
+    $(JAXWS_TOPDIR)/src \
+    $(NASHORN_TOPDIR)/src \
+    #
+
+# There are snmp classes in the open but they are not included in OpenJDK
+JAVA_MODULES_FILTER := jdk.snmp
+
+# Find all modules with java sources by looking in the source dirs
+define FindJavaModules
+  $(filter-out $(JAVA_MODULES_FILTER), $(sort $(notdir \
+      $(patsubst %/,%, $(dir $(patsubst %/,%, $(dir $(patsubst %/,%, $(dir \
+      $(wildcard $(patsubst %,%/*/share/classes/*, $(ALL_TOP_SRC_DIRS)) \
+          $(patsubst %,%/*/$(OPENJDK_TARGET_OS_API_DIR)/classes/*, $(ALL_TOP_SRC_DIRS)) \
+          $(patsubst %,%/*/$(OPENJDK_TARGET_OS)/classes/*, $(ALL_TOP_SRC_DIRS))))))))))))
+endef
+
+MODULES_LIST_FILE := $(SRC_ROOT)/make/common/modules.list
+MODULE_DEPS_MAKEFILE := $(OUTPUT_ROOT)/module-deps.gmk
+
+$(MODULE_DEPS_MAKEFILE): $(MODULES_LIST_FILE)
+	$(CAT) $^ | $(SED) -e 's/^\([^:]*\):/DEPS_\1 :=/g' > $@
+
+-include $(MODULE_DEPS_MAKEFILE)
+
+# Param 1: Module to find deps for
+define FindDepsForModule
+  $(DEPS_$(strip $1))
+endef
+
+# Find all modules with source for the target platform.
+define FindAllModules
+  $(sort $(filter-out closed demo sample, $(notdir $(patsubst %/,%, $(dir \
+      $(wildcard $(patsubst %, %/*/share, $(ALL_TOP_SRC_DIRS)) \
+      $(patsubst %, %/*/$(OPENJDK_TARGET_OS), $(ALL_TOP_SRC_DIRS)) \
+      $(patsubst %, %/*/$(OPENJDK_TARGET_OS_API_DIR), $(ALL_TOP_SRC_DIRS))))))))
+endef
+
+################################################################################
+
+# Hook to include the corresponding custom file, if present.
+$(eval $(call IncludeCustomExtension, , common/Modules.gmk))
+
+endif # _MODULES_GMK
--- a/make/common/SetupJava.gmk	Fri Aug 29 11:58:43 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-#
-# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-ifndef _SETUP_GMK
-_SETUP_GMK := 1
-
-include JavaCompilation.gmk
-
-DISABLE_WARNINGS := -Xlint:all,-deprecation,-unchecked,-rawtypes,-cast,-serial,-dep-ann,-static,-fallthrough,-try,-varargs,-empty,-finally
-
-# To build with all warnings enabled, do the following:
-# make JAVAC_WARNINGS="-Xlint:all -Xmaxwarns 10000"
-JAVAC_WARNINGS := -Xlint:all,-deprecation -Werror
-
-# Any java code executed during a JDK build to build other parts of the JDK must be 
-# executed by the bootstrap JDK (probably with -Xbootclasspath/p: ) and for this 
-# purpose must be built with -target PREVIOUS for bootstrapping purposes, which 
-# requires restricting to language level and api of previous JDK.
-#
-# The generate old bytecode javac setup uses the new compiler to compile for the
-# boot jdk to generate tools that need to be run with the boot jdk.
-# Thus we force the target bytecode to the previous JDK version.
-$(eval $(call SetupJavaCompiler,GENERATE_OLDBYTECODE, \
-    JVM := $(JAVA_SMALL), \
-    JAVAC := $(NEW_JAVAC), \
-    FLAGS := $(BOOT_JDK_SOURCETARGET) -bootclasspath $(BOOT_RTJAR) $(DISABLE_WARNINGS), \
-    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
-    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
-
-# The generate new bytecode javac setup uses the new compiler to compile for the
-# new jdk. This new bytecode might only be possible to run using the new jvm.
-$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE, \
-    JVM := $(JAVA), \
-    JAVAC := $(NEW_JAVAC), \
-    FLAGS := -source 9 -target 9 \
-        -encoding ascii -XDignore.symbol.file=true $(JAVAC_WARNINGS) \
-        $(GENERATE_JDKBYTECODE_EXTRA_FLAGS), \
-    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
-    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
-
-# The generate new bytecode javac setup uses the new compiler to compile for the
-# new jdk. This new bytecode might only be possible to run using the new jvm.
-$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE_NOWARNINGS, \
-    JVM := $(JAVA), \
-    JAVAC := $(NEW_JAVAC), \
-    FLAGS := -source 9 -target 9 \
-        -encoding ascii -XDignore.symbol.file=true $(DISABLE_WARNINGS) \
-        $(GENERATE_JDKBYTECODE_EXTRA_FLAGS), \
-    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
-    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
-
-JDK_BOOTCLASSPATH := $(subst $(SPACE),$(PATH_SEP),\
-    $(filter-out $(JDK_OUTPUTDIR)/modules/_%, $(wildcard $(JDK_OUTPUTDIR)/modules/*)))
-
-# After the jdk is built, we want to build demos using only the recently
-# generated jdk classes and nothing else, no jdk source, etc etc.
-# I.e. the rt.jar, but since rt.jar has not yet been generated
-# (it will be in "make images") therefore we use classes instead.
-$(eval $(call SetupJavaCompiler,GENERATE_USINGJDKBYTECODE, \
-    JVM := $(JAVA_SMALL), \
-    JAVAC := $(NEW_JAVAC), \
-    FLAGS := -bootclasspath "$(JDK_BOOTCLASSPATH)" $(DISABLE_WARNINGS), \
-    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
-    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
-
-endif # _SETUP_GMK
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/common/SetupJavaCompilers.gmk	Wed Jul 05 19:58:32 2017 +0200
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+ifndef _SETUP_GMK
+_SETUP_GMK := 1
+
+include JavaCompilation.gmk
+
+DISABLE_WARNINGS := -Xlint:all,-deprecation,-unchecked,-rawtypes,-cast,-serial,-dep-ann,-static,-fallthrough,-try,-varargs,-empty,-finally
+
+# To build with all warnings enabled, do the following:
+# make JAVAC_WARNINGS="-Xlint:all -Xmaxwarns 10000"
+JAVAC_WARNINGS := -Xlint:all,-deprecation -Werror
+
+# Any java code executed during a JDK build to build other parts of the JDK must be 
+# executed by the bootstrap JDK (probably with -Xbootclasspath/p: ) and for this 
+# purpose must be built with -target PREVIOUS for bootstrapping purposes, which 
+# requires restricting to language level and api of previous JDK.
+#
+# The generate old bytecode javac setup uses the new compiler to compile for the
+# boot jdk to generate tools that need to be run with the boot jdk.
+# Thus we force the target bytecode to the previous JDK version.
+$(eval $(call SetupJavaCompiler,GENERATE_OLDBYTECODE, \
+    JVM := $(JAVA_SMALL), \
+    JAVAC := $(NEW_JAVAC), \
+    FLAGS := $(BOOT_JDK_SOURCETARGET) -bootclasspath $(BOOT_RTJAR) $(DISABLE_WARNINGS), \
+    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
+    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
+
+# The generate new bytecode javac setup uses the new compiler to compile for the
+# new jdk. This new bytecode might only be possible to run using the new jvm.
+$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE, \
+    JVM := $(JAVA), \
+    JAVAC := $(NEW_JAVAC), \
+    FLAGS := -source 9 -target 9 \
+        -encoding ascii -XDignore.symbol.file=true $(JAVAC_WARNINGS) \
+        $(GENERATE_JDKBYTECODE_EXTRA_FLAGS), \
+    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
+    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
+
+# The generate new bytecode javac setup uses the new compiler to compile for the
+# new jdk. This new bytecode might only be possible to run using the new jvm.
+$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE_NOWARNINGS, \
+    JVM := $(JAVA), \
+    JAVAC := $(NEW_JAVAC), \
+    FLAGS := -source 9 -target 9 \
+        -encoding ascii -XDignore.symbol.file=true $(DISABLE_WARNINGS) \
+        $(GENERATE_JDKBYTECODE_EXTRA_FLAGS), \
+    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
+    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
+
+JDK_BOOTCLASSPATH := $(subst $(SPACE),$(PATH_SEP),\
+    $(filter-out $(JDK_OUTPUTDIR)/modules/_%, $(wildcard $(JDK_OUTPUTDIR)/modules/*)))
+
+# After the jdk is built, we want to build demos using only the recently
+# generated jdk classes and nothing else, no jdk source, etc etc.
+# I.e. the rt.jar, but since rt.jar has not yet been generated
+# (it will be in "make images") therefore we use classes instead.
+$(eval $(call SetupJavaCompiler,GENERATE_USINGJDKBYTECODE, \
+    JVM := $(JAVA_SMALL), \
+    JAVAC := $(NEW_JAVAC), \
+    FLAGS := -bootclasspath "$(JDK_BOOTCLASSPATH)" $(DISABLE_WARNINGS), \
+    SERVER_DIR := $(SJAVAC_SERVER_DIR), \
+    SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
+
+endif # _SETUP_GMK
--- a/make/jprt.properties	Fri Aug 29 11:58:43 2014 -0700
+++ b/make/jprt.properties	Wed Jul 05 19:58:32 2017 +0200
@@ -466,15 +466,16 @@
   windows_i586_6.1-fastdebug-c2-hotspot_internalvmtests,		\
   windows_x64_6.1-fastdebug-c2-hotspot_internalvmtests
 
-my.make.rule.test.targets.hotspot.reg.group=					\
-  solaris_sparcv9_5.11-{product|fastdebug}-c2-GROUP,				\
-  solaris_x64_5.11-{product|fastdebug}-c2-GROUP,				\
-  linux_i586_2.6-{product|fastdebug}-c2-GROUP,					\
-  linux_x64_2.6-{product|fastdebug}-c2-GROUP,					\
-  windows_i586_6.1-{product|fastdebug}-c2-GROUP,				\
-  windows_x64_6.1-{product|fastdebug}-c2-GROUP,					\
-  linux_i586_2.6-{product|fastdebug}-c1-GROUP,					\
-  windows_i586_6.1-{product|fastdebug}-c1-GROUP
+my.make.rule.test.targets.hotspot.reg.group=				\
+  solaris_sparcv9_5.11-fastdebug-c2-GROUP,				\
+  solaris_x64_5.11-fastdebug-c2-GROUP,					\
+  linux_i586_2.6-fastdebug-c2-GROUP,					\
+  linux_x64_2.6-fastdebug-c2-GROUP,					\
+  macosx_x64_10.7-fastdebug-c2-GROUP,					\
+  windows_i586_6.1-fastdebug-c2-GROUP,					\
+  windows_x64_6.1-fastdebug-c2-GROUP,					\
+  linux_i586_2.6-fastdebug-c1-GROUP,					\
+  windows_i586_6.1-fastdebug-c1-GROUP
 
 my.make.rule.test.targets.hotspot=						\
   ${my.make.rule.test.targets.hotspot.clienttests},				\
@@ -483,6 +484,7 @@
   ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_wbapitest},	\
   ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler},	\
   ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_gc},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_runtime},	\
+  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_runtime},		\
+  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_runtime_closed},	\
   ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_serviceability},	\
   ${my.additional.make.rule.test.targets.hotspot}