Merge
authorduke
Wed, 05 Jul 2017 19:40:49 +0200
changeset 24378 7467e0df1327
parent 24377 2e754d3dbdf2 (current diff)
parent 24360 a0f6522e5758 (diff)
child 24382 3569424d797a
Merge
--- a/.hgtags-top-repo	Thu May 15 10:41:25 2014 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 19:40:49 2017 +0200
@@ -255,3 +255,4 @@
 fa13f2b926f8426876ec03e7903f3ee0ee150f2e jdk9-b10
 ab55a18a95e1990a588929d5d29db3eb9985fea0 jdk9-b11
 59f6350295f9681fe5956d8bc889bf341914c6cb jdk9-b12
+5800456add07e1a68170a229fb5e27376f8875e5 jdk9-b13
--- a/common/autoconf/boot-jdk.m4	Thu May 15 10:41:25 2014 -0700
+++ b/common/autoconf/boot-jdk.m4	Wed Jul 05 19:40:49 2017 +0200
@@ -331,8 +331,8 @@
 
   # Finally, set some other options...
 
-  # When compiling code to be executed by the Boot JDK, force jdk7 compatibility.
-  BOOT_JDK_SOURCETARGET="-source 7 -target 7"
+  # When compiling code to be executed by the Boot JDK, force jdk8 compatibility.
+  BOOT_JDK_SOURCETARGET="-source 8 -target 8"
   AC_SUBST(BOOT_JDK_SOURCETARGET)
   AC_SUBST(JAVAC_FLAGS)
 ])
--- a/common/autoconf/generated-configure.sh	Thu May 15 10:41:25 2014 -0700
+++ b/common/autoconf/generated-configure.sh	Wed Jul 05 19:40:49 2017 +0200
@@ -4243,7 +4243,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1398861894
+DATE_WHEN_GENERATED=1399969244
 
 ###############################################################################
 #
@@ -26004,8 +26004,8 @@
 
   # Finally, set some other options...
 
-  # When compiling code to be executed by the Boot JDK, force jdk7 compatibility.
-  BOOT_JDK_SOURCETARGET="-source 7 -target 7"
+  # When compiling code to be executed by the Boot JDK, force jdk8 compatibility.
+  BOOT_JDK_SOURCETARGET="-source 8 -target 8"
 
 
 
--- a/common/bin/hgforest.sh	Thu May 15 10:41:25 2014 -0700
+++ b/common/bin/hgforest.sh	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,4 @@
 #!/bin/sh
-
 #
 # Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -23,25 +22,40 @@
 # questions.
 #
 
-# Shell script for a fast parallel forest command
+# Shell script for a fast parallel forest/trees command
 
-global_opts=""
-status_output="/dev/stdout"
-qflag="false"
-vflag="false"
-sflag="false"
+usage() {
+      echo "usage: $0 [-h|--help] [-q|--quiet] [-v|--verbose] [-s|--sequential] [--] <command> [commands...]" > ${status_output}
+      echo "Environment variables which modify behaviour:"
+      echo "   HGFOREST_QUIET      : (boolean) If 'true' then standard output is redirected to /dev/null"
+      echo "   HGFOREST_VERBOSE    : (boolean) If 'true' then Mercurial asked to produce verbose output"
+      echo "   HGFOREST_SEQUENTIAL : (boolean) If 'true' then repos are processed sequentially. Disables concurrency"
+      echo "   HGFOREST_GLOBALOPTS : (string, must begin with space) Additional Mercurial global options"
+      echo "   HGFOREST_REDIRECT   : (file path) Redirect standard output to specified file"
+      echo "   HGFOREST_FIFOS      : (boolean) Default behaviour for FIFO detection. Does not override FIFOs disabled"
+      echo "   HGFOREST_CONCURRENCY: (positive integer) Number of repos to process concurrently"
+      echo "   HGFOREST_DEBUG      : (boolean) If 'true' then temp files are retained"
+      exit 1
+}
+
+global_opts="${HGFOREST_GLOBALOPTS:-}"
+status_output="${HGFOREST_REDIRECT:-/dev/stdout}"
+qflag="${HGFOREST_QUIET:-false}"
+vflag="${HGFOREST_VERBOSE:-false}"
+sflag="${HGFOREST_SEQUENTIAL:-false}"
 while [ $# -gt 0 ]
 do
   case $1 in
+    -h | --help )
+      usage
+      ;;
+
     -q | --quiet )
       qflag="true"
-      global_opts="${global_opts} -q"
-      status_output="/dev/null"
       ;;
 
     -v | --verbose )
       vflag="true"
-      global_opts="${global_opts} -v"
       ;;
 
     -s | --sequential )
@@ -63,39 +77,60 @@
   shift
 done
 
-
-command="$1"; shift
-command_args="$@"
+# silence standard output?
+if [ ${qflag} = "true" ] ; then
+  global_opts="${global_opts} -q"
+  status_output="/dev/null"
+fi
 
-usage() {
-      echo "usage: $0 [-q|--quiet] [-v|--verbose] [-s|--sequential] [--] <command> [commands...]" > ${status_output}
-      exit 1
-}
+# verbose output?
+if [ ${vflag} = "true" ] ; then
+  global_opts="${global_opts} -v"
+fi
 
-if [ "x" = "x$command" ] ; then
+# Make sure we have a command.
+if [ $# -lt 1 -o -z "${1:-}" ] ; then
   echo "ERROR: No command to hg supplied!"
   usage
 fi
 
-# Check if we can use fifos for monitoring sub-process completion.
-on_windows=`uname -s | egrep -ic -e 'cygwin|msys'`
-if [ ${on_windows} = "1" ]; then
-  # cygwin has (2014-04-18) broken (single writer only) FIFOs
-  # msys has (2014-04-18) no FIFOs.
-  have_fifos="false"
-else
-  have_fifos="true"
-fi
+command="$1"; shift
+command_args="${@:-}"
 
 # Clean out the temporary directory that stores the pid files.
 tmp=/tmp/forest.$$
 rm -f -r ${tmp}
 mkdir -p ${tmp}
 
+
+if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
+  echo "DEBUG: temp files are in: ${tmp}"
+fi
+
+# Check if we can use fifos for monitoring sub-process completion.
+echo "1" > ${tmp}/read
+while_subshell=1
+while read line; do
+  while_subshell=0
+  break;
+done < ${tmp}/read
+rm ${tmp}/read
+
+on_windows=`uname -s | egrep -ic -e 'cygwin|msys'`
+
+if [ ${while_subshell} = "1" -o ${on_windows} = "1" ]; then
+  # cygwin has (2014-04-18) broken (single writer only) FIFOs
+  # msys has (2014-04-18) no FIFOs.
+  # older shells create a sub-shell for redirect to while
+  have_fifos="false"
+else
+  have_fifos="${HGFOREST_FIFOS:-true}"
+fi
+
 safe_interrupt () {
   if [ -d ${tmp} ]; then
     if [ "`ls ${tmp}/*.pid`" != "" ]; then
-      echo "Waiting for processes ( `cat ${tmp}/*.pid | tr '\n' ' '`) to terminate nicely!" > ${status_output}
+      echo "Waiting for processes ( `cat ${tmp}/.*.pid ${tmp}/*.pid 2> /dev/null | tr '\n' ' '`) to terminate nicely!" > ${status_output}
       sleep 1
       # Pipe stderr to dev/null to silence kill, that complains when trying to kill
       # a subprocess that has already exited.
@@ -110,10 +145,12 @@
 
 nice_exit () {
   if [ -d ${tmp} ]; then
-    if [ "`ls ${tmp}`" != "" ]; then
+    if [ "`ls -A ${tmp} 2> /dev/null`" != "" ]; then
       wait
     fi
-    rm -f -r ${tmp}
+    if [ "${HGFOREST_DEBUG:-false}" != "true" ] ; then
+      rm -f -r ${tmp}
+    fi
   fi
 }
 
@@ -128,17 +165,20 @@
 repos=""
 repos_extra=""
 if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then
+  # we must be a clone
   if [ ! -f .hg/hgrc ] ; then
     echo "ERROR: Need initial repository to use this script" > ${status_output}
     exit 1
   fi
 
+  # the clone must know where it came from (have a default pull path).
   pull_default=`hg paths default`
   if [ "${pull_default}" = "" ] ; then
     echo "ERROR: Need initial clone with 'hg paths default' defined" > ${status_output}
     exit 1
   fi
 
+  # determine which sub repos need to be cloned.
   for i in ${subrepos} ; do
     if [ ! -f ${i}/.hg/hgrc ] ; then
       repos="${repos} ${i}"
@@ -147,12 +187,15 @@
 
   pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
 
-  if [ "${command_args}" != "" ] ; then
+  if [ -n "${command_args}" ] ; then
+    # if there is an "extra sources" path then reparent "extra" repos to that path
     if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then
       echo "ERROR: Need initial clone from non-local source" > ${status_output}
       exit 1
     fi
     pull_extra="${command_args}/${pull_default_tail}"
+
+    # determine which extra subrepos need to be cloned.
     for i in ${subrepos_extra} ; do
       if [ ! -f ${i}/.hg/hgrc ] ; then
         repos_extra="${repos_extra} ${i}"
@@ -160,7 +203,7 @@
     done
   else
     if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then
-      # local source repo. Copy the extras ones that exist there.
+      # local source repo. Clone the "extra" subrepos that exist there.
       for i in ${subrepos_extra} ; do
         if [ -f ${pull_default}/${i}/.hg/hgrc -a ! -f ${i}/.hg/hgrc ] ; then
           # sub-repo there in source but not here
@@ -169,13 +212,17 @@
       done
     fi
   fi
-  at_a_time=2
+
   # Any repos to deal with?
   if [ "${repos}" = "" -a "${repos_extra}" = "" ] ; then
     echo "No repositories to process." > ${status_output}
     exit
   fi
+
+  # Repos to process concurrently. Clone does better with low concurrency.
+  at_a_time="${HGFOREST_CONCURRENCY:-2}"
 else
+  # Process command for all of the present repos
   for i in . ${subrepos} ${subrepos_extra} ; do
     if [ -d ${i}/.hg ] ; then
       repos="${repos} ${i}"
@@ -189,6 +236,7 @@
   fi
 
   # any of the repos locked?
+  locked=""
   for i in ${repos} ; do
     if [ -h ${i}/.hg/store/lock -o -f ${i}/.hg/store/lock ] ; then
       locked="${i} ${locked}"
@@ -198,34 +246,39 @@
     echo "ERROR: These repositories are locked: ${locked}" > ${status_output}
     exit 1
   fi
-  at_a_time=8
+
+  # Repos to process concurrently.
+  at_a_time="${HGFOREST_CONCURRENCY:-8}"
 fi
 
 # Echo out what repositories we do a command on.
 echo "# Repositories: ${repos} ${repos_extra}" > ${status_output}
 
 if [ "${command}" = "serve" ] ; then
-  # "serve" is run for all the repos.
+  # "serve" is run for all the repos as one command.
   (
     (
+      cwd=`pwd`
+      serving=`basename ${cwd}`
       (
         echo "[web]"
-        echo "description = $(basename $(pwd))"
+        echo "description = ${serving}"
         echo "allow_push = *"
         echo "push_ssl = False"
 
         echo "[paths]"
-        for i in ${repos} ${repos_extra} ; do
+        for i in ${repos} ; do
           if [ "${i}" != "." ] ; then
-            echo "/$(basename $(pwd))/${i} = ${i}"
+            echo "/${serving}/${i} = ${i}"
           else
-            echo "/$(basename $(pwd)) = $(pwd)"
+            echo "/${serving} = ${cwd}"
           fi
         done
       ) > ${tmp}/serve.web-conf
 
-      echo "serving root repo $(basename $(pwd))"
+      echo "serving root repo ${serving}" > ${status_output}
 
+      echo "hg${global_opts} serve" > ${status_output}
       (PYTHONUNBUFFERED=true hg${global_opts} serve -A ${status_output} -E ${status_output} --pid-file ${tmp}/serve.pid --web-conf ${tmp}/serve.web-conf; echo "$?" > ${tmp}/serve.pid.rc ) 2>&1 &
     ) 2>&1 | sed -e "s@^@serve:   @" > ${status_output}
   ) &
@@ -234,81 +287,93 @@
 
   # n is the number of subprocess started or which might still be running.
   n=0
-  if [ $have_fifos = "true" ]; then
+  if [ ${have_fifos} = "true" ]; then
     # if we have fifos use them to detect command completion.
     mkfifo ${tmp}/fifo
     exec 3<>${tmp}/fifo
-    if [ "${sflag}" = "true" ] ; then
-      # force sequential
-      at_a_time=1
-    fi
   fi
 
+  # iterate over all of the subrepos.
   for i in ${repos} ${repos_extra} ; do
     n=`expr ${n} '+' 1`
     repopidfile=`echo ${i} | sed -e 's@./@@' -e 's@/@_@g'`
     reponame=`echo ${i} | sed -e :a -e 's/^.\{1,20\}$/ &/;ta'`
     pull_base="${pull_default}"
-    for j in $repos_extra ; do
-      if [ "$i" = "$j" ] ; then
-          pull_base="${pull_extra}"
+
+    # regular repo or "extra" repo?
+    for j in ${repos_extra} ; do
+      if [ "${i}" = "${j}" ] ; then
+        # it's an "extra"
+        pull_base="${pull_extra}"
       fi
     done
+
+    # remove trailing slash
     pull_base="`echo ${pull_base} | sed -e 's@[/]*$@@'`"
+
+    # execute the command on the subrepo
     (
       (
         if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone" ] ; then
-          pull_newrepo="${pull_base}/${i}"
-          path="`dirname ${i}`"
-          if [ "${path}" != "." ] ; then
+          # some form of clone
+          clone_newrepo="${pull_base}/${i}"
+          parent_path="`dirname ${i}`"
+          if [ "${parent_path}" != "." ] ; then
             times=0
-            while [ ! -d "${path}" ]   ## nested repo, ensure containing dir exists
-            do
+            while [ ! -d "${parent_path}" ] ; do  ## nested repo, ensure containing dir exists
+              if [ "${sflag}" = "true" ] ; then
+                # Missing parent is fatal during sequential operation.
+                echo "ERROR: Missing parent path: ${parent_path}" > ${status_output}
+                exit 1
+              fi
               times=`expr ${times} '+' 1`
               if [ `expr ${times} '%' 10` -eq 0 ] ; then
-                echo "${path} still not created, waiting..." > ${status_output}
+                echo "${parent_path} still not created, waiting..." > ${status_output}
               fi
               sleep 5
             done
           fi
-          echo "hg${global_opts} clone ${pull_newrepo} ${i}" > ${status_output}
-          (PYTHONUNBUFFERED=true hg${global_opts} clone ${pull_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
+          # run the clone command.
+          echo "hg${global_opts} clone ${clone_newrepo} ${i}" > ${status_output}
+          (PYTHONUNBUFFERED=true hg${global_opts} clone ${clone_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
         else
+          # run the command.
           echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output}
           cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} ${command_args}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
         fi
 
         echo $! > ${tmp}/${repopidfile}.pid
       ) 2>&1 | sed -e "s@^@${reponame}:   @" > ${status_output}
-      if [ $have_fifos = "true" ]; then
-        echo "${reponame}" >&3
+      # tell the fifo waiter that this subprocess is done.
+      if [ ${have_fifos} = "true" ]; then
+        echo "${i}" >&3
       fi
     ) &
 
-    if [ $have_fifos = "true" ]; then
-      # check on count of running subprocesses and possibly wait for completion
-      if [ ${at_a_time} -lt ${n} ] ; then
-        # read will block until there are completed subprocesses
-        while read repo_done; do
-          n=`expr ${n} '-' 1`
-          if [ ${n} -lt ${at_a_time} ] ; then
-            # we should start more subprocesses
-            break;
-          fi
-        done <&3
-      fi
+    if [ "${sflag}" = "true" ] ; then
+      # complete this task before starting another.
+      wait
     else
-      if [ "${sflag}" = "false" ] ; then
+      if [ "${have_fifos}" = "true" ]; then
+        # check on count of running subprocesses and possibly wait for completion
+        if [ ${n} -ge ${at_a_time} ] ; then
+          # read will block until there are completed subprocesses
+          while read repo_done; do
+            n=`expr ${n} '-' 1`
+            if [ ${n} -lt ${at_a_time} ] ; then
+              # we should start more subprocesses
+              break;
+            fi
+          done <&3
+        fi
+      else
         # Compare completions to starts
-        completed="`(ls -1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
-        while [ ${at_a_time} -lt `expr ${n} '-' ${completed}` ] ; do
+        completed="`(ls -a1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
+        while [ `expr ${n} '-' ${completed}` -ge ${at_a_time} ] ; do
           # sleep a short time to give time for something to complete
           sleep 1
-          completed="`(ls -1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
+          completed="`(ls -a1 ${tmp}/*.pid.rc 2> /dev/null | wc -l) || echo 0`"
         done
-      else
-        # complete this task before starting another.
-        wait
       fi
     fi
   done
@@ -320,11 +385,12 @@
 # Terminate with exit 0 only if all subprocesses were successful
 ec=0
 if [ -d ${tmp} ]; then
-  for rc in ${tmp}/*.pid.rc ; do
+  rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`"
+  for rc in ${rcfiles} ; do
     exit_code=`cat ${rc} | tr -d ' \n\r'`
     if [ "${exit_code}" != "0" ] ; then
-      repo="`echo ${rc} | sed -e s@^${tmp}@@ -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
-      echo "WARNING: ${repo} exited abnormally ($exit_code)" > ${status_output}
+      repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
+      echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output}
       ec=1
     fi
   done
--- a/hotspot/.hgtags	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 19:40:49 2017 +0200
@@ -415,3 +415,4 @@
 ebc44d040cd149d2120d69fe183a3dae7840f4b4 jdk9-b10
 783309c3a1a629a452673399dcfa83ef7eca94d8 jdk9-b11
 1c383bb39e2849ca62cb763f4e182a29b421d60a jdk9-b12
+456ad9c99133803d4e1433124c85a6fd141b9ac9 jdk9-b13
--- a/hotspot/agent/src/os/linux/libproc.h	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/agent/src/os/linux/libproc.h	Wed Jul 05 19:40:49 2017 +0200
@@ -34,19 +34,7 @@
 #include "libproc_md.h"
 #endif
 
-#if defined(sparc) || defined(sparcv9)
-/*
-  If _LP64 is defined ptrace.h should be taken from /usr/include/asm-sparc64
-  otherwise it should be from /usr/include/asm-sparc
-  These two files define pt_regs structure differently
-*/
-#ifdef _LP64
-#include "asm-sparc64/ptrace.h"
-#else
-#include "asm-sparc/ptrace.h"
-#endif
-
-#endif //sparc or sparcv9
+#include <linux/ptrace.h>
 
 /************************************************************************************
 
--- a/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,3 +1,4 @@
+
 /*
  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
@@ -403,7 +404,7 @@
   BLOCK_COMMENT("compute_interpreter_state {");
 
   // access_flags = method->access_flags();
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwa(access_flags, method_(access_flags));
 
   // parameter_count = method->constMethod->size_of_parameters();
@@ -419,10 +420,8 @@
   // TODO: PPC port: assert(2 == ConstMethod::sz_max_stack(), "unexpected field size");
   __ lhz(max_stack, in_bytes(ConstMethod::max_stack_offset()), max_stack);
 
-  if (EnableInvokeDynamic) {
-    // Take into account 'extra_stack_entries' needed by method handles (see method.hpp).
+  // Take into account 'extra_stack_entries' needed by method handles (see method.hpp).
     __ addi(max_stack, max_stack, Method::extra_stack_entries());
-  }
 
   // mem_stack_limit = thread->stack_limit();
   __ ld(mem_stack_limit, thread_(stack_overflow_limit));
@@ -1055,7 +1054,7 @@
   assert(access_flags->is_nonvolatile(),
          "access_flags must be in a non-volatile register");
   // Type check.
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwz(access_flags, method_(access_flags));
 
   // We don't want to reload R19_method and access_flags after calls
@@ -1838,7 +1837,7 @@
   // Interpreter state fields.
   const Register msg               = R24_tmp4;
 
-  // MethodOop fields.
+  // Method fields.
   const Register parameter_count   = R25_tmp5;
   const Register result_index      = R26_tmp6;
 
@@ -2023,7 +2022,7 @@
   __ add(R17_tos, R17_tos, parameter_count);
 
   // Result stub address array index
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwa(result_index, method_(result_index));
 
   __ li(msg, BytecodeInterpreter::method_resume);
@@ -2709,7 +2708,7 @@
   __ ld(R3_ARG1, state_(_result._osr._osr_buf));
   __ mtctr(R12_scratch2);
 
-  // Load method oop, gc may move it during execution of osr'd method.
+  // Load method, gc may move it during execution of osr'd method.
   __ ld(R22_tmp2, state_(_method));
   // Load message 'call_method'.
   __ li(R23_tmp3, BytecodeInterpreter::call_method);
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,6 +26,8 @@
 #ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
 #define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
 
+#include "code/codeCache.hpp"
+
 // Inline functions for ppc64 frames:
 
 // Find codeblob and set deopt_state.
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -380,7 +380,6 @@
   if (index_size == sizeof(u2)) {
     get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
   } else if (index_size == sizeof(u4)) {
-    assert(EnableInvokeDynamic, "giant index used only for JSR 292");
     get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     nand(Rdst, Rdst, Rdst); // convert to plain index
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,7 +26,7 @@
 #ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
 #define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
 
-#include "assembler_ppc.inline.hpp"
+#include "asm/macroAssembler.hpp"
 #include "interpreter/invocationCounter.hpp"
 
 // This file specializes the assembler with interpreter-specific macros.
--- a/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -139,32 +139,16 @@
   // Signature is in R3_RET. Signature is callee saved.
   __ mr(signature, R3_RET);
 
-  // Reload method, it may have moved.
-#ifdef CC_INTERP
-  __ ld(R19_method, state_(_method));
-#else
-  __ ld(R19_method, 0, target_sp);
-  __ ld(R19_method, _ijava_state_neg(method), R19_method);
-#endif
-
   // Get the result handler.
   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
 
-  // Reload method, it may have moved.
-#ifdef CC_INTERP
-  __ ld(R19_method, state_(_method));
-#else
-  __ ld(R19_method, 0, target_sp);
-  __ ld(R19_method, _ijava_state_neg(method), R19_method);
-#endif
-
   {
     Label L;
     // test if static
     // _access_flags._flags must be at offset 0.
     // TODO PPC port: requires change in shared code.
     //assert(in_bytes(AccessFlags::flags_offset()) == 0,
-    //       "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
+    //       "MethodDesc._access_flags == MethodDesc._access_flags._flags");
     // _access_flags must be a 32 bit value.
     assert(sizeof(AccessFlags) == 4, "wrong size");
     __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
--- a/hotspot/src/cpu/ppc/vm/jniFastGetField_ppc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/jniFastGetField_ppc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,7 +32,7 @@
 
 
 address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
-  // we don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
@@ -57,12 +57,12 @@
 }
 
 address JNI_FastGetField::generate_fast_get_long_field() {
-  // we don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
 address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
-  // e don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Wed Jul 05 19:40:49 2017 +0200
@@ -898,7 +898,7 @@
   // To keep related declarations/definitions/uses close together,
   // we switch between source %{ }% and source_hpp %{ }% freely as needed.
 
-  // Returns true if Node n is followed by a MemBar node that 
+  // Returns true if Node n is followed by a MemBar node that
   // will do an acquire. If so, this node must not do the acquire
   // operation.
   bool followed_by_acquire(const Node *n);
@@ -908,7 +908,7 @@
 
 // Optimize load-acquire.
 //
-// Check if acquire is unnecessary due to following operation that does 
+// Check if acquire is unnecessary due to following operation that does
 // acquire anyways.
 // Walk the pattern:
 //
@@ -919,12 +919,12 @@
 //  Proj(ctrl)  Proj(mem)
 //       |         |
 //   MemBarRelease/Volatile
-// 
+//
 bool followed_by_acquire(const Node *load) {
   assert(load->is_Load(), "So far implemented only for loads.");
 
   // Find MemBarAcquire.
-  const Node *mba = NULL;         
+  const Node *mba = NULL;
   for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
     const Node *out = load->fast_out(i);
     if (out->Opcode() == Op_MemBarAcquire) {
@@ -937,7 +937,7 @@
 
   // Find following MemBar node.
   //
-  // The following node must be reachable by control AND memory 
+  // The following node must be reachable by control AND memory
   // edge to assure no other operations are in between the two nodes.
   //
   // So first get the Proj node, mem_proj, to use it to iterate forward.
@@ -1135,6 +1135,7 @@
 
  public:
 
+  // Emit call stub, compiled java to interpreter.
   static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
 
   // Size of call trampoline stub.
@@ -2755,7 +2756,7 @@
       // inputs for new nodes
       m1->add_req(NULL, n_toc);
       m2->add_req(NULL, m1);
-      
+
       // operands for new nodes
       m1->_opnds[0] = new (C) iRegPdstOper(); // dst
       m1->_opnds[1] = op_src;                 // src
@@ -2763,29 +2764,29 @@
       m2->_opnds[0] = new (C) iRegPdstOper(); // dst
       m2->_opnds[1] = op_src;                 // src
       m2->_opnds[2] = new (C) iRegLdstOper(); // base
-      
+
       // Initialize ins_attrib TOC fields.
       m1->_const_toc_offset = -1;
       m2->_const_toc_offset_hi_node = m1;
-      
+
       // Register allocation for new nodes.
       ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-      
+
       nodes->push(m1);
       nodes->push(m2);
       assert(m2->bottom_type()->isa_ptr(), "must be ptr");
     } else {
       loadConPNode *m2 = new (C) loadConPNode();
-      
+
       // inputs for new nodes
       m2->add_req(NULL, n_toc);
-      
+
       // operands for new nodes
       m2->_opnds[0] = new (C) iRegPdstOper(); // dst
       m2->_opnds[1] = op_src;                 // src
       m2->_opnds[2] = new (C) iRegPdstOper(); // toc
-      
+
       // Register allocation for new nodes.
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 
@@ -2977,17 +2978,17 @@
       n_sub_base->_opnds[1] = op_crx;
       n_sub_base->_opnds[2] = op_src;
       n_sub_base->_bottom_type = _bottom_type;
-   
+
       n_shift->add_req(n_region, n_sub_base);
       n_shift->_opnds[0] = op_dst;
       n_shift->_opnds[1] = op_dst;
       n_shift->_bottom_type = _bottom_type;
-   
+
       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
       ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-   
+
       nodes->push(n_move);
       nodes->push(n_compare);
       nodes->push(n_sub_base);
@@ -3064,20 +3065,20 @@
     } else {
       // before Power 7
       cond_add_baseNode *n_add_base = new (C) cond_add_baseNode();
-     
+
       n_add_base->add_req(n_region, n_compare, n_shift);
       n_add_base->_opnds[0] = op_dst;
       n_add_base->_opnds[1] = op_crx;
       n_add_base->_opnds[2] = op_dst;
       n_add_base->_bottom_type = _bottom_type;
-     
+
       assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
       ra_->set_oop(n_add_base, true);
-     
+
       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
       ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-     
+
       nodes->push(n_compare);
       nodes->push(n_shift);
       nodes->push(n_add_base);
@@ -3634,11 +3635,11 @@
     // Req...
     for (uint i = 0; i < req(); ++i) {
       // The expanded node does not need toc any more.
-      // Add the inline cache constant here instead.  This expresses the 
+      // Add the inline cache constant here instead. This expresses the
       // register of the inline cache must be live at the call.
       // Else we would have to adapt JVMState by -1.
       if (i == mach_constant_base_node_input()) {
-        call->add_req(loadConLNodes_IC._last);        
+        call->add_req(loadConLNodes_IC._last);
       } else {
         call->add_req(in(i));
       }
@@ -3666,6 +3667,8 @@
   %}
 
   // Compound version of call dynamic
+  // Toc is only passed so that it can be used in ins_encode statement.
+  // In the code we have to use $constanttablebase.
   enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     MacroAssembler _masm(&cbuf);
@@ -3673,14 +3676,17 @@
 
     Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
 #if 0
+    int vtable_index = this->_vtable_index;
     if (_vtable_index < 0) {
       // Must be invalid_vtable_index, not nonvirtual_vtable_index.
       assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
       Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
-      AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
-
+
+      // Virtual call relocation will point to ic load.
       address virtual_call_meta_addr = __ pc();
-      __ load_const_from_method_toc(ic_reg, meta, Rtoc);
+      // Load a clear inline cache.
+      AddressLiteral empty_ic((address) Universe::non_oop_word());
+      __ load_const_from_method_toc(ic_reg, empty_ic, Rtoc);
       // CALL to fixup routine.  Fixup routine uses ScopeDesc info
       // to determine who we intended to call.
       __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
@@ -3713,7 +3719,6 @@
              "Fix constant in ret_addr_offset()");
     }
 #endif
-    guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
     Unimplemented();  // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
   %}
 
@@ -5439,7 +5444,7 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// Match loading integer and casting it to unsigned int in 
+// Match loading integer and casting it to unsigned int in
 // long register.
 // LoadI + ConvI2L + AndL 0xffffffff.
 instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
@@ -6081,7 +6086,7 @@
   ins_pipe(pipe_class_default);
 %}
 
-// This needs a match rule so that build_oop_map knows this is 
+// This needs a match rule so that build_oop_map knows this is
 // not a narrow oop.
 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
   match(Set dst src1);
@@ -6705,7 +6710,7 @@
   size(4);
   ins_encode %{
     // This is a Power7 instruction for which no machine description exists.
-    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -6850,7 +6855,7 @@
   size(4);
   ins_encode %{
     // This is a Power7 instruction for which no machine description exists.
-    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7067,7 +7072,7 @@
     n1->_bottom_type = _bottom_type;
 
     decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
-    n2->add_req(n_region, n2);
+    n2->add_req(n_region, n1);
     n2->_opnds[0] = op_dst;
     n2->_opnds[1] = op_dst;
     n2->_bottom_type = _bottom_type;
@@ -7202,7 +7207,7 @@
 //  inline_unsafe_load_store).
 //
 // Add this node again if we found a good solution for inline_unsafe_load_store().
-// Don't forget to look at the implementation of post_store_load_barrier again, 
+// Don't forget to look at the implementation of post_store_load_barrier again,
 // we did other fixes in that method.
 //instruct unnecessary_membar_volatile() %{
 //  match(MemBarVolatile);
@@ -7240,7 +7245,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7286,7 +7291,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7332,7 +7337,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7379,7 +7384,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7525,8 +7530,8 @@
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
-    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, 
-                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(), 
+    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
                 $res$$Register, true);
   %}
   ins_pipe(pipe_class_default);
@@ -7932,7 +7937,23 @@
 
 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
 // positive longs and 0xF...F for negative ones.
-instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{
+instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "SRADI   $dst, $src, #63" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sradi);
+    __ sradi($dst$$Register, $src$$Register, 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
+// positive longs and 0xF...F for negative ones.
+instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
   // no match-rule, false predicate
   effect(DEF dst, USE src);
   predicate(false);
@@ -8896,7 +8917,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
-    __ rlwinm($dst$$Register, $src1$$Register, 0, 
+    __ rlwinm($dst$$Register, $src1$$Register, 0,
               (31-log2_long((jlong) $src2$$constant)) & 0x1f, (31-log2_long((jlong) $src2$$constant)) & 0x1f);
   %}
   ins_pipe(pipe_class_default);
@@ -9622,14 +9643,14 @@
   ins_cost(DEFAULT_COST*4);
 
   expand %{
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    sxtI_reg(src1s, src1); // ensure proper sign extention
-    sxtI_reg(src2s, src2); // ensure proper sign extention
-    subI_reg_reg(diff, src1s, src2s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src1s, src2s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(dst, diff);
+    signmask64I_regL(dst, diff);
   %}
 %}
 
@@ -10866,7 +10887,7 @@
   format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
-    __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register, 
+    __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
                                      $tmp_klass$$Register, NULL, $result$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -11181,18 +11202,18 @@
   ins_cost(DEFAULT_COST*6);
 
   expand %{
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    iRegIdst sm;
-    iRegIdst doz; // difference or zero
-    sxtI_reg(src1s, src1); // Ensure proper sign extention.
-    sxtI_reg(src2s, src2); // Ensure proper sign extention.
-    subI_reg_reg(diff, src2s, src1s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    iRegLdst sm;
+    iRegLdst doz; // difference or zero
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src2s, src1s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(sm, diff);
-    andI_reg_reg(doz, diff, sm); // <=0
-    addI_reg_reg(dst, doz, src1s);
+    signmask64L_regL(sm, diff);
+    andL_reg_reg(doz, diff, sm); // <=0
+    addI_regL_regL(dst, doz, src1s);
   %}
 %}
 
@@ -11201,19 +11222,18 @@
   ins_cost(DEFAULT_COST*6);
 
   expand %{
-    immI_minus1 m1 %{ -1 %}
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    iRegIdst sm;
-    iRegIdst doz; // difference or zero
-    sxtI_reg(src1s, src1); // Ensure proper sign extention.
-    sxtI_reg(src2s, src2); // Ensure proper sign extention.
-    subI_reg_reg(diff, src2s, src1s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    iRegLdst sm;
+    iRegLdst doz; // difference or zero
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src2s, src1s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(sm, diff);
-    andcI_reg_reg(doz, sm, m1, diff); // >=0
-    addI_reg_reg(dst, doz, src1s);
+    signmask64L_regL(sm, diff);
+    andcL_reg_reg(doz, diff, sm); // >=0
+    addI_regL_regL(dst, doz, src1s);
   %}
 %}
 
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -81,24 +81,18 @@
 #if 0
 // Call special ClassCastException constructor taking object to cast
 // and target class as arguments.
-address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) {
+address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
   address entry = __ pc();
 
-  // Target class oop is in register R6_ARG4 by convention!
-
   // Expression stack must be empty before entering the VM if an
   // exception happened.
   __ empty_expression_stack();
-  // Setup parameters.
+
   // Thread will be loaded to R3_ARG1.
-  __ load_const_optimized(R4_ARG2, (address) name);
-  __ mr(R5_ARG3, R17_tos);
-  // R6_ARG4 contains specified class.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
-#ifdef ASSERT
+  // Target class oop is in register R5_ARG3 by convention!
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
   // Above call must not return here since exception pending.
-  __ should_not_reach_here();
-#endif
+  DEBUG_ONLY(__ should_not_reach_here();)
   return entry;
 }
 #endif
@@ -1538,14 +1532,32 @@
     __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
 
     // Get out of the current method and re-execute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
     __ restore_interpreter_state(R11_scratch1);
     __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-    __ mtlr(return_pc);
     if (ProfileInterpreter) {
       __ set_method_data_pointer_for_bcp();
     }
+#if INCLUDE_JVMTI
+    Label L_done;
+
+    __ lbz(R11_scratch1, 0, R14_bcp);
+    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
+    __ bne(CCR0, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+    __ ld(R4_ARG2, 0, R18_locals);
+    __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
+               R4_ARG2, R19_method, R14_bcp);
+
+    __ cmpdi(CCR0, R11_scratch1, 0);
+    __ beq(CCR0, L_done);
+
+    __ std(R11_scratch1, wordSize, R15_esp);
+    __ bind(L_done);
+#endif // INCLUDE_JVMTI
     __ dispatch_next(vtos);
   }
   // end of JVMTI PopFrame support
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -64,7 +64,7 @@
   assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
 
   switch (barrier) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
@@ -104,7 +104,7 @@
         __ bind(Ldone);
       }
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       {
@@ -259,17 +259,17 @@
   switch (value) {
     default: ShouldNotReachHere();
     case 0: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 1: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 2: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
@@ -282,12 +282,12 @@
   static double one  = 1.0;
   switch (value) {
     case 0: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 1: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
@@ -3453,16 +3453,6 @@
                  Rscratch1 = R11_scratch1,
                  Rscratch2 = R12_scratch2;
 
-  if (!EnableInvokeDynamic) {
-    // We should not encounter this bytecode if !EnableInvokeDynamic.
-    // The verifier will stop it. However, if we get past the verifier,
-    // this will stop the thread in a reasonable way, without crashing the JVM.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
-    // The call_VM checks for exception, so we should never return here.
-    __ should_not_reach_here();
-    return;
-  }
-
   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
 
   // Profile this call.
@@ -3486,12 +3476,6 @@
                  Rscratch1 = R11_scratch1,
                  Rscratch2 = R12_scratch2;
 
-  if (!EnableInvokeDynamic) {
-    // Rewriter does not generate this bytecode.
-    __ should_not_reach_here();
-    return;
-  }
-
   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
   __ verify_method_ptr(Rmethod);
   __ null_check_throw(Rrecv, -1, Rscratch2);
@@ -3728,9 +3712,9 @@
   transition(atos, atos);
 
   Label Ldone, Lis_null, Lquicked, Lresolved;
-  Register Roffset         = R5_ARG3,
+  Register Roffset         = R6_ARG4,
            RobjKlass       = R4_ARG2,
-           RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect this register.
+           RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
            Rcpool          = R11_scratch1,
            Rtags           = R12_scratch2;
 
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -123,8 +123,13 @@
     fpop2_op3    = 0x35,
     impdep1_op3  = 0x36,
     aes3_op3     = 0x36,
+    alignaddr_op3  = 0x36,
+    faligndata_op3 = 0x36,
     flog3_op3    = 0x36,
+    edge_op3     = 0x36,
+    fsrc_op3     = 0x36,
     impdep2_op3  = 0x37,
+    stpartialf_op3 = 0x37,
     jmpl_op3     = 0x38,
     rett_op3     = 0x39,
     trap_op3     = 0x3a,
@@ -175,17 +180,23 @@
 
   enum opfs {
     // selected opfs
+    edge8n_opf         = 0x01,
+
     fmovs_opf          = 0x01,
     fmovd_opf          = 0x02,
 
     fnegs_opf          = 0x05,
     fnegd_opf          = 0x06,
 
+    alignaddr_opf      = 0x18,
+
     fadds_opf          = 0x41,
     faddd_opf          = 0x42,
     fsubs_opf          = 0x45,
     fsubd_opf          = 0x46,
 
+    faligndata_opf     = 0x48,
+
     fmuls_opf          = 0x49,
     fmuld_opf          = 0x4a,
     fdivs_opf          = 0x4d,
@@ -348,6 +359,8 @@
     ASI_PRIMARY            = 0x80,
     ASI_PRIMARY_NOFAULT    = 0x82,
     ASI_PRIMARY_LITTLE     = 0x88,
+    // 8x8-bit partial store
+    ASI_PST8_PRIMARY       = 0xC0,
     // Block initializing store
     ASI_ST_BLKINIT_PRIMARY = 0xE2,
     // Most-Recently-Used (MRU) BIS variant
@@ -585,6 +598,9 @@
   // instruction only in VIS1
   static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
 
+  // instruction only in VIS2
+  static void vis2_only() { assert( VM_Version::has_vis2(), "This instruction only works on SPARC with VIS2"); }
+
   // instruction only in VIS3
   static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
 
@@ -1164,6 +1180,20 @@
   inline void wrfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
 
 
+  //  VIS1 instructions
+
+  void alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); }
+
+  void faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D)); }
+
+  void fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w)); }
+
+  void stpartialf( Register s1, Register s2, FloatRegister d, int ia = -1 ) { vis1_only(); emit_int32( op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2)); }
+
+  //  VIS2 instructions
+
+  void edge8n( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2)); }
+
   // VIS3 instructions
 
   void movstosw( FloatRegister s, Register d ) { vis3_only();  emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -259,8 +259,8 @@
 
   // next two fns read and write Lmonitors value,
  private:
-  BasicObjectLock* interpreter_frame_monitors()           const  { return *interpreter_frame_monitors_addr(); }
-  void interpreter_frame_set_monitors(BasicObjectLock* monitors) {        *interpreter_frame_monitors_addr() = monitors; }
+  BasicObjectLock* interpreter_frame_monitors() const;
+  void interpreter_frame_set_monitors(BasicObjectLock* monitors);
 #else
  public:
   inline interpreterState get_interpreterState() const {
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -226,6 +226,13 @@
   return (Method**)sp_addr_at( Lmethod->sp_offset_in_saved_window());
 }
 
+inline BasicObjectLock* frame::interpreter_frame_monitors() const {
+  return *interpreter_frame_monitors_addr();
+}
+
+inline void frame::interpreter_frame_set_monitors(BasicObjectLock* monitors) {
+  *interpreter_frame_monitors_addr() = monitors;
+}
 
 // Constant pool cache
 
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -727,7 +727,6 @@
   if (index_size == sizeof(u2)) {
     get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);
   } else if (index_size == sizeof(u4)) {
-    assert(EnableInvokeDynamic, "giant index used only for JSR 292");
     get_4_byte_integer_at_bcp(bcp_offset, temp, index);
     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     xor3(index, -1, index);  // convert to plain index
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,7 +83,7 @@
  private:
 
 #ifdef PRODUCT
-#define inc_counter_np(a,b,c) (0)
+#define inc_counter_np(a,b,c)
 #else
 #define inc_counter_np(counter, t1, t2) \
   BLOCK_COMMENT("inc_counter " #counter); \
@@ -1055,7 +1055,7 @@
                                               Label& L_loop, bool use_prefetch, bool use_bis);
 
   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
-                          int iter_size, CopyLoopFunc copy_loop_func) {
+                          int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) {
     Label L_copy;
 
     assert(log2_elem_size <= 3, "the following code should be changed");
@@ -1206,7 +1206,7 @@
     __ inc(from, 8);
     __ sllx(O3, left_shift,  O3);
 
-    disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
+    disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop);
 
     __ inccc(count, count_dec>>1 ); // + 8 bytes
     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
@@ -2085,7 +2085,7 @@
       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
       __ sllx(O3, 32,  O3);
 
-      disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
+      disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop);
 
       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
       __ delayed()->inc(count, 4); // restore 'count'
@@ -2366,7 +2366,7 @@
     // count >= 0 (original count - 8)
     __ mov(from, from64);
 
-    disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
+    disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop);
 
       // Restore O4(offset0), O5(offset8)
       __ sub(from64, from, offset0);
@@ -3305,9 +3305,12 @@
   }
 
   address generate_aescrypt_encryptBlock() {
+    // required since we read expanded key 'int' array starting first element without alignment considerations
+    assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
+           "the following code assumes that first element of an int array is aligned to 8 bytes");
     __ align(CodeEntryAlignment);
-    StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
-    Label L_doLast128bit, L_storeOutput;
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
+    Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output;
     address start = __ pc();
     Register from = O0; // source byte array
     Register to = O1;   // destination byte array
@@ -3317,15 +3320,33 @@
     // read expanded key length
     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
 
-    // load input into F54-F56; F30-F31 used as temp
-    __ ldf(FloatRegisterImpl::S, from, 0, F30);
-    __ ldf(FloatRegisterImpl::S, from, 4, F31);
-    __ fmov(FloatRegisterImpl::D, F30, F54);
-    __ ldf(FloatRegisterImpl::S, from, 8, F30);
-    __ ldf(FloatRegisterImpl::S, from, 12, F31);
-    __ fmov(FloatRegisterImpl::D, F30, F56);
-
-    // load expanded key
+    // Method to address arbitrary alignment for load instructions:
+    // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary
+    // If zero/aligned then continue with double FP load instructions
+    // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata
+    // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address
+    // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address
+    // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs
+
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
+    __ delayed()->alignaddr(from, G0, from);
+
+    // aligned case: load input into F54-F56
+    __ ldf(FloatRegisterImpl::D, from, 0, F54);
+    __ ldf(FloatRegisterImpl::D, from, 8, F56);
+    __ ba_short(L_load_expanded_key);
+
+    __ BIND(L_load_misaligned_input);
+    __ ldf(FloatRegisterImpl::D, from, 0, F54);
+    __ ldf(FloatRegisterImpl::D, from, 8, F56);
+    __ ldf(FloatRegisterImpl::D, from, 16, F58);
+    __ faligndata(F54, F56, F54);
+    __ faligndata(F56, F58, F56);
+
+    __ BIND(L_load_expanded_key);
+    // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed
     for ( int i = 0;  i <= 38; i += 2 ) {
       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
     }
@@ -3365,8 +3386,7 @@
     __ ldf(FloatRegisterImpl::D, key, 232, F50);
     __ aes_eround01(F52, F54, F56, F58); //round 13
     __ aes_eround23(F46, F54, F56, F60);
-    __ br(Assembler::always, false, Assembler::pt, L_storeOutput);
-    __ delayed()->nop();
+    __ ba_short(L_storeOutput);
 
     __ BIND(L_doLast128bit);
     __ ldf(FloatRegisterImpl::D, key, 160, F48);
@@ -3377,23 +3397,62 @@
     __ aes_eround01_l(F48, F58, F60, F54); //last round
     __ aes_eround23_l(F50, F58, F60, F56);
 
-    // store output into the destination array, F0-F1 used as temp
-    __ fmov(FloatRegisterImpl::D, F54, F0);
-    __ stf(FloatRegisterImpl::S, F0, to, 0);
-    __ stf(FloatRegisterImpl::S, F1, to, 4);
-    __ fmov(FloatRegisterImpl::D, F56, F0);
-    __ stf(FloatRegisterImpl::S, F0, to, 8);
+    // Method to address arbitrary alignment for store instructions:
+    // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary
+    // If zero/aligned then continue with double FP store instructions
+    // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case)
+    // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001
+    // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case
+    // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case.
+    // Set GSR.align to (8-n) using alignaddr
+    // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf
+    // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address
+    // Store (partial) the original first (8-n) bytes starting at the original 'dest' address
+    // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address
+    // We need to execute this process for both the 8-byte result values
+
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, O5);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
+    __ delayed()->edge8n(to, G0, O3);
+
+    // aligned case: store output into the destination array
+    __ stf(FloatRegisterImpl::D, F54, to, 0);
     __ retl();
-    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+    __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8);
+
+    __ BIND(L_store_misaligned_output);
+    __ add(to, 8, O4);
+    __ mov(8, O2);
+    __ sub(O2, O5, O2);
+    __ alignaddr(O2, G0, O2);
+    __ faligndata(F54, F54, F54);
+    __ faligndata(F56, F56, F56);
+    __ and3(to, -8, to);
+    __ and3(O4, -8, O4);
+    __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(O4, 8, O4);
+    __ orn(G0, O3, O3);
+    __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
+    __ retl();
+    __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
 
     return start;
   }
 
   address generate_aescrypt_decryptBlock() {
+    assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
+           "the following code assumes that first element of an int array is aligned to 8 bytes");
+    // required since we read original key 'byte' array as well in the decryption stubs
+    assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
+           "the following code assumes that first element of a byte array is aligned to 8 bytes");
     __ align(CodeEntryAlignment);
-    StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
+    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
     address start = __ pc();
-    Label L_expand192bit, L_expand256bit, L_common_transform;
+    Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input;
+    Label L_256bit_transform, L_common_transform, L_store_misaligned_output;
     Register from = O0; // source byte array
     Register to = O1;   // destination byte array
     Register key = O2;  // expanded key array
@@ -3403,15 +3462,29 @@
     // read expanded key array length
     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
 
-    // load input into F52-F54; F30,F31 used as temp
-    __ ldf(FloatRegisterImpl::S, from, 0, F30);
-    __ ldf(FloatRegisterImpl::S, from, 4, F31);
-    __ fmov(FloatRegisterImpl::D, F30, F52);
-    __ ldf(FloatRegisterImpl::S, from, 8, F30);
-    __ ldf(FloatRegisterImpl::S, from, 12, F31);
-    __ fmov(FloatRegisterImpl::D, F30, F54);
-
+    // save 'from' since we may need to recheck alignment in case of 256-bit decryption
+    __ mov(from, G1);
+
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
+    __ delayed()->alignaddr(from, G0, from);
+
+    // aligned case: load input into F52-F54
+    __ ldf(FloatRegisterImpl::D, from, 0, F52);
+    __ ldf(FloatRegisterImpl::D, from, 8, F54);
+    __ ba_short(L_load_original_key);
+
+    __ BIND(L_load_misaligned_input);
+    __ ldf(FloatRegisterImpl::D, from, 0, F52);
+    __ ldf(FloatRegisterImpl::D, from, 8, F54);
+    __ ldf(FloatRegisterImpl::D, from, 16, F56);
+    __ faligndata(F52, F54, F52);
+    __ faligndata(F54, F56, F54);
+
+    __ BIND(L_load_original_key);
     // load original key from SunJCE expanded decryption key
+    // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
     for ( int i = 0;  i <= 3; i++ ) {
       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
     }
@@ -3432,8 +3505,7 @@
     // perform 128-bit key specific inverse cipher transformation
     __ fxor(FloatRegisterImpl::D, F42, F54, F54);
     __ fxor(FloatRegisterImpl::D, F40, F52, F52);
-    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
-    __ delayed()->nop();
+    __ ba_short(L_common_transform);
 
     __ BIND(L_expand192bit);
 
@@ -3457,8 +3529,7 @@
     __ aes_dround01(F44, F52, F54, F56);
     __ aes_dround23(F42, F56, F58, F54);
     __ aes_dround01(F40, F56, F58, F52);
-    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
-    __ delayed()->nop();
+    __ ba_short(L_common_transform);
 
     __ BIND(L_expand256bit);
 
@@ -3478,14 +3549,31 @@
     __ aes_kexpand2(F50, F56, F58);
 
     for ( int i = 0;  i <= 6; i += 2 ) {
-      __ fmov(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
+      __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
     }
 
-    // load input into F52-F54
+    // reload original 'from' address
+    __ mov(G1, from);
+
+    // re-check 8-byte alignment
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input);
+    __ delayed()->alignaddr(from, G0, from);
+
+    // aligned case: load input into F52-F54
     __ ldf(FloatRegisterImpl::D, from, 0, F52);
     __ ldf(FloatRegisterImpl::D, from, 8, F54);
+    __ ba_short(L_256bit_transform);
+
+    __ BIND(L_reload_misaligned_input);
+    __ ldf(FloatRegisterImpl::D, from, 0, F52);
+    __ ldf(FloatRegisterImpl::D, from, 8, F54);
+    __ ldf(FloatRegisterImpl::D, from, 16, F56);
+    __ faligndata(F52, F54, F52);
+    __ faligndata(F54, F56, F54);
 
     // perform 256-bit key specific inverse cipher transformation
+    __ BIND(L_256bit_transform);
     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
     __ fxor(FloatRegisterImpl::D, F2, F52, F52);
     __ aes_dround23(F4, F52, F54, F58);
@@ -3515,43 +3603,71 @@
       }
     }
 
-    // store output to destination array, F0-F1 used as temp
-    __ fmov(FloatRegisterImpl::D, F52, F0);
-    __ stf(FloatRegisterImpl::S, F0, to, 0);
-    __ stf(FloatRegisterImpl::S, F1, to, 4);
-    __ fmov(FloatRegisterImpl::D, F54, F0);
-    __ stf(FloatRegisterImpl::S, F0, to, 8);
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, O5);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
+    __ delayed()->edge8n(to, G0, O3);
+
+    // aligned case: store output into the destination array
+    __ stf(FloatRegisterImpl::D, F52, to, 0);
     __ retl();
-    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+    __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8);
+
+    __ BIND(L_store_misaligned_output);
+    __ add(to, 8, O4);
+    __ mov(8, O2);
+    __ sub(O2, O5, O2);
+    __ alignaddr(O2, G0, O2);
+    __ faligndata(F52, F52, F52);
+    __ faligndata(F54, F54, F54);
+    __ and3(to, -8, to);
+    __ and3(O4, -8, O4);
+    __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(O4, 8, O4);
+    __ orn(G0, O3, O3);
+    __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
+    __ retl();
+    __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
 
     return start;
   }
 
   address generate_cipherBlockChaining_encryptAESCrypt() {
+    assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
+           "the following code assumes that first element of an int array is aligned to 8 bytes");
+    assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
+           "the following code assumes that first element of a byte array is aligned to 8 bytes");
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
-    Label L_cbcenc128, L_cbcenc192, L_cbcenc256;
+    Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit;
+    Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform;
+    Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit;
+    Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit;
     address start = __ pc();
-    Register from = O0; // source byte array
-    Register to = O1;   // destination byte array
-    Register key = O2;  // expanded key array
-    Register rvec = O3; // init vector
-    const Register len_reg = O4; // cipher length
-    const Register keylen = O5;  // reg for storing expanded key array length
-
-    // save cipher len to return in the end
-    __ mov(len_reg, L1);
+    Register from = I0; // source byte array
+    Register to = I1;   // destination byte array
+    Register key = I2;  // expanded key array
+    Register rvec = I3; // init vector
+    const Register len_reg = I4; // cipher length
+    const Register keylen = I5;  // reg for storing expanded key array length
+
+    // save cipher len before save_frame, to return in the end
+    __ mov(O4, L0);
+    __ save_frame(0);
 
     // read expanded key length
     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
 
-    // load init vector
+    // load initial vector, 8-byte alignment is guranteed
     __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
     __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
+    // load key, 8-byte alignment is guranteed
     __ ldx(key,0,G1);
-    __ ldx(key,8,G2);
-
-    // start loading expanded key
+    __ ldx(key,8,G5);
+
+    // start loading expanded key, 8-byte alignment is guranteed
     for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
     }
@@ -3571,15 +3687,35 @@
     }
 
     // 256-bit original key size
-    __ br(Assembler::always, false, Assembler::pt, L_cbcenc256);
-    __ delayed()->nop();
+    __ ba_short(L_cbcenc256);
 
     __ align(OptoLoopAlignment);
     __ BIND(L_cbcenc128);
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit);
+    __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G3 and G4
     __ ldx(from,0,G3);
     __ ldx(from,8,G4);
+    __ ba_short(L_128bit_transform);
+
+    __ BIND(L_load_misaligned_input_128bit);
+    // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
+    __ alignaddr(from, G0, from);
+    __ ldf(FloatRegisterImpl::D, from, 0, F48);
+    __ ldf(FloatRegisterImpl::D, from, 8, F50);
+    __ ldf(FloatRegisterImpl::D, from, 16, F52);
+    __ faligndata(F48, F50, F48);
+    __ faligndata(F50, F52, F50);
+    __ movdtox(F48, G3);
+    __ movdtox(F50, G4);
+    __ mov(L1, from);
+
+    __ BIND(L_128bit_transform);
     __ xor3(G1,G3,G3);
-    __ xor3(G2,G4,G4);
+    __ xor3(G5,G4,G4);
     __ movxtod(G3,F56);
     __ movxtod(G4,F58);
     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
@@ -3598,24 +3734,81 @@
       }
     }
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, L1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit);
+    __ delayed()->edge8n(to, G0, L2);
+
+    // aligned case: store output into the destination array
     __ stf(FloatRegisterImpl::D, F60, to, 0);
     __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ ba_short(L_check_loop_end_128bit);
+
+    __ BIND(L_store_misaligned_output_128bit);
+    __ add(to, 8, L3);
+    __ mov(8, L4);
+    __ sub(L4, L1, L4);
+    __ alignaddr(L4, G0, L4);
+    // save cipher text before circular right shift
+    // as it needs to be stored as iv for next block (see code before next retl)
+    __ movdtox(F60, L6);
+    __ movdtox(F62, L7);
+    __ faligndata(F60, F60, F60);
+    __ faligndata(F62, F62, F62);
+    __ mov(to, L5);
+    __ and3(to, -8, to);
+    __ and3(L3, -8, L3);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(L3, 8, L3);
+    __ orn(G0, L2, L2);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ mov(L5, to);
+    __ movxtod(L6, F60);
+    __ movxtod(L7, F62);
+
+    __ BIND(L_check_loop_end_128bit);
     __ add(from, 16, from);
     __ add(to, 16, to);
     __ subcc(len_reg, 16, len_reg);
     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
     __ delayed()->nop();
+    // re-init intial vector for next block, 8-byte alignment is guaranteed
     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ restore();
     __ retl();
-    __ delayed()->mov(L1, O0);
+    __ delayed()->mov(L0, O0);
 
     __ align(OptoLoopAlignment);
     __ BIND(L_cbcenc192);
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit);
+    __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G3 and G4
     __ ldx(from,0,G3);
     __ ldx(from,8,G4);
+    __ ba_short(L_192bit_transform);
+
+    __ BIND(L_load_misaligned_input_192bit);
+    // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
+    __ alignaddr(from, G0, from);
+    __ ldf(FloatRegisterImpl::D, from, 0, F48);
+    __ ldf(FloatRegisterImpl::D, from, 8, F50);
+    __ ldf(FloatRegisterImpl::D, from, 16, F52);
+    __ faligndata(F48, F50, F48);
+    __ faligndata(F50, F52, F50);
+    __ movdtox(F48, G3);
+    __ movdtox(F50, G4);
+    __ mov(L1, from);
+
+    __ BIND(L_192bit_transform);
     __ xor3(G1,G3,G3);
-    __ xor3(G2,G4,G4);
+    __ xor3(G5,G4,G4);
     __ movxtod(G3,F56);
     __ movxtod(G4,F58);
     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
@@ -3634,24 +3827,81 @@
       }
     }
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, L1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit);
+    __ delayed()->edge8n(to, G0, L2);
+
+    // aligned case: store output into the destination array
     __ stf(FloatRegisterImpl::D, F60, to, 0);
     __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ ba_short(L_check_loop_end_192bit);
+
+    __ BIND(L_store_misaligned_output_192bit);
+    __ add(to, 8, L3);
+    __ mov(8, L4);
+    __ sub(L4, L1, L4);
+    __ alignaddr(L4, G0, L4);
+    __ movdtox(F60, L6);
+    __ movdtox(F62, L7);
+    __ faligndata(F60, F60, F60);
+    __ faligndata(F62, F62, F62);
+    __ mov(to, L5);
+    __ and3(to, -8, to);
+    __ and3(L3, -8, L3);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(L3, 8, L3);
+    __ orn(G0, L2, L2);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ mov(L5, to);
+    __ movxtod(L6, F60);
+    __ movxtod(L7, F62);
+
+    __ BIND(L_check_loop_end_192bit);
     __ add(from, 16, from);
     __ subcc(len_reg, 16, len_reg);
     __ add(to, 16, to);
     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
     __ delayed()->nop();
+    // re-init intial vector for next block, 8-byte alignment is guaranteed
     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ restore();
     __ retl();
-    __ delayed()->mov(L1, O0);
+    __ delayed()->mov(L0, O0);
 
     __ align(OptoLoopAlignment);
     __ BIND(L_cbcenc256);
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit);
+    __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G3 and G4
     __ ldx(from,0,G3);
     __ ldx(from,8,G4);
+    __ ba_short(L_256bit_transform);
+
+    __ BIND(L_load_misaligned_input_256bit);
+    // cannot clobber F48, F50 and F52. F56, F58 can be used though
+    __ alignaddr(from, G0, from);
+    __ movdtox(F60, L2); // save F60 before overwriting
+    __ ldf(FloatRegisterImpl::D, from, 0, F56);
+    __ ldf(FloatRegisterImpl::D, from, 8, F58);
+    __ ldf(FloatRegisterImpl::D, from, 16, F60);
+    __ faligndata(F56, F58, F56);
+    __ faligndata(F58, F60, F58);
+    __ movdtox(F56, G3);
+    __ movdtox(F58, G4);
+    __ mov(L1, from);
+    __ movxtod(L2, F60);
+
+    __ BIND(L_256bit_transform);
     __ xor3(G1,G3,G3);
-    __ xor3(G2,G4,G4);
+    __ xor3(G5,G4,G4);
     __ movxtod(G3,F56);
     __ movxtod(G4,F58);
     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
@@ -3670,26 +3920,69 @@
       }
     }
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, L1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit);
+    __ delayed()->edge8n(to, G0, L2);
+
+    // aligned case: store output into the destination array
     __ stf(FloatRegisterImpl::D, F60, to, 0);
     __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ ba_short(L_check_loop_end_256bit);
+
+    __ BIND(L_store_misaligned_output_256bit);
+    __ add(to, 8, L3);
+    __ mov(8, L4);
+    __ sub(L4, L1, L4);
+    __ alignaddr(L4, G0, L4);
+    __ movdtox(F60, L6);
+    __ movdtox(F62, L7);
+    __ faligndata(F60, F60, F60);
+    __ faligndata(F62, F62, F62);
+    __ mov(to, L5);
+    __ and3(to, -8, to);
+    __ and3(L3, -8, L3);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(L3, 8, L3);
+    __ orn(G0, L2, L2);
+    __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ mov(L5, to);
+    __ movxtod(L6, F60);
+    __ movxtod(L7, F62);
+
+    __ BIND(L_check_loop_end_256bit);
     __ add(from, 16, from);
     __ subcc(len_reg, 16, len_reg);
     __ add(to, 16, to);
     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
     __ delayed()->nop();
+    // re-init intial vector for next block, 8-byte alignment is guaranteed
     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ restore();
     __ retl();
-    __ delayed()->mov(L1, O0);
+    __ delayed()->mov(L0, O0);
 
     return start;
   }
 
   address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
+    assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
+           "the following code assumes that first element of an int array is aligned to 8 bytes");
+    assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
+           "the following code assumes that first element of a byte array is aligned to 8 bytes");
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
     Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
     Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
+    Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128;
+    Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256;
+    Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128;
+    Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192;
+    Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256;
     address start = __ pc();
     Register from = I0; // source byte array
     Register to = I1;   // destination byte array
@@ -3704,11 +3997,12 @@
     __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
 
     // load original key from SunJCE expanded decryption key
+    // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
     for ( int i = 0;  i <= 3; i++ ) {
       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
     }
 
-    // load initial vector
+    // load initial vector, 8-byte alignment is guaranteed
     __ ldx(rvec,0,L0);
     __ ldx(rvec,8,L1);
 
@@ -3733,11 +4027,10 @@
     __ movdtox(F42,L3);
 
     __ and3(len_reg, 16, L4);
-    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks128);
-    __ delayed()->nop();
-
-    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
-    __ delayed()->nop();
+    __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128);
+    __ nop();
+
+    __ ba_short(L_dec_first_block_start);
 
     __ BIND(L_expand192bit);
     // load rest of the 192-bit key
@@ -3758,11 +4051,10 @@
     __ movdtox(F50,L3);
 
     __ and3(len_reg, 16, L4);
-    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks192);
-    __ delayed()->nop();
-
-    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
-    __ delayed()->nop();
+    __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192);
+    __ nop();
+
+    __ ba_short(L_dec_first_block_start);
 
     __ BIND(L_expand256bit);
     // load rest of the 256-bit key
@@ -3785,12 +4077,32 @@
     __ movdtox(F58,L3);
 
     __ and3(len_reg, 16, L4);
-    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks256);
-    __ delayed()->nop();
+    __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256);
 
     __ BIND(L_dec_first_block_start);
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block);
+    __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into L4 and L5
     __ ldx(from,0,L4);
     __ ldx(from,8,L5);
+    __ ba_short(L_transform_first_block);
+
+    __ BIND(L_load_misaligned_input_first_block);
+    __ alignaddr(from, G0, from);
+    // F58, F60, F62 can be clobbered
+    __ ldf(FloatRegisterImpl::D, from, 0, F58);
+    __ ldf(FloatRegisterImpl::D, from, 8, F60);
+    __ ldf(FloatRegisterImpl::D, from, 16, F62);
+    __ faligndata(F58, F60, F58);
+    __ faligndata(F60, F62, F60);
+    __ movdtox(F58, L4);
+    __ movdtox(F60, L5);
+    __ mov(G1, from);
+
+    __ BIND(L_transform_first_block);
     __ xor3(L2,L4,G1);
     __ movxtod(G1,F60);
     __ xor3(L3,L5,G1);
@@ -3833,9 +4145,36 @@
     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, G1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block);
+    __ delayed()->edge8n(to, G0, G2);
+
+    // aligned case: store output into the destination array
     __ stf(FloatRegisterImpl::D, F60, to, 0);
     __ stf(FloatRegisterImpl::D, F62, to, 8);
-
+    __ ba_short(L_check_decrypt_end);
+
+    __ BIND(L_store_misaligned_output_first_block);
+    __ add(to, 8, G3);
+    __ mov(8, G4);
+    __ sub(G4, G1, G4);
+    __ alignaddr(G4, G0, G4);
+    __ faligndata(F60, F60, F60);
+    __ faligndata(F62, F62, F62);
+    __ mov(to, G1);
+    __ and3(to, -8, to);
+    __ and3(G3, -8, G3);
+    __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ add(to, 8, to);
+    __ add(G3, 8, G3);
+    __ orn(G0, G2, G2);
+    __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
+    __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
+    __ mov(G1, to);
+
+    __ BIND(L_check_decrypt_end);
     __ add(from, 16, from);
     __ add(to, 16, to);
     __ subcc(len_reg, 16, len_reg);
@@ -3852,17 +4191,44 @@
     __ BIND(L_dec_next2_blocks128);
     __ nop();
 
-    // F40:F42 used for first 16-bytes
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128);
+    __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G4, G5, L4 and L5
     __ ldx(from,0,G4);
     __ ldx(from,8,G5);
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ ba_short(L_transform_next2_blocks128);
+
+    __ BIND(L_load_misaligned_next2_blocks128);
+    __ alignaddr(from, G0, from);
+    // F40, F42, F58, F60, F62 can be clobbered
+    __ ldf(FloatRegisterImpl::D, from, 0, F40);
+    __ ldf(FloatRegisterImpl::D, from, 8, F42);
+    __ ldf(FloatRegisterImpl::D, from, 16, F60);
+    __ ldf(FloatRegisterImpl::D, from, 24, F62);
+    __ ldf(FloatRegisterImpl::D, from, 32, F58);
+    __ faligndata(F40, F42, F40);
+    __ faligndata(F42, F60, F42);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F58, F62);
+    __ movdtox(F40, G4);
+    __ movdtox(F42, G5);
+    __ movdtox(F60, L4);
+    __ movdtox(F62, L5);
+    __ mov(G1, from);
+
+    __ BIND(L_transform_next2_blocks128);
+    // F40:F42 used for first 16-bytes
     __ xor3(L2,G4,G1);
     __ movxtod(G1,F40);
     __ xor3(L3,G5,G1);
     __ movxtod(G1,F42);
 
     // F60:F62 used for next 16-bytes
-    __ ldx(from,16,L4);
-    __ ldx(from,24,L5);
     __ xor3(L2,L4,G1);
     __ movxtod(G1,F60);
     __ xor3(L3,L5,G1);
@@ -3891,9 +4257,6 @@
     __ fxor(FloatRegisterImpl::D, F46, F40, F40);
     __ fxor(FloatRegisterImpl::D, F44, F42, F42);
 
-    __ stf(FloatRegisterImpl::D, F40, to, 0);
-    __ stf(FloatRegisterImpl::D, F42, to, 8);
-
     __ movxtod(G4,F56);
     __ movxtod(G5,F58);
     __ mov(L4,L0);
@@ -3901,32 +4264,93 @@
     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
 
+    // For mis-aligned store of 32 bytes of result we can do:
+    // Circular right-shift all 4 FP registers so that 'head' and 'tail'
+    // parts that need to be stored starting at mis-aligned address are in a FP reg
+    // the other 3 FP regs can thus be stored using regular store
+    // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts
+
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, G1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128);
+    __ delayed()->edge8n(to, G0, G2);
+
+    // aligned case: store output into the destination array
+    __ stf(FloatRegisterImpl::D, F40, to, 0);
+    __ stf(FloatRegisterImpl::D, F42, to, 8);
     __ stf(FloatRegisterImpl::D, F60, to, 16);
     __ stf(FloatRegisterImpl::D, F62, to, 24);
-
+    __ ba_short(L_check_decrypt_loop_end128);
+
+    __ BIND(L_store_misaligned_output_next2_blocks128);
+    __ mov(8, G4);
+    __ sub(G4, G1, G4);
+    __ alignaddr(G4, G0, G4);
+    __ faligndata(F40, F42, F56); // F56 can be clobbered
+    __ faligndata(F42, F60, F42);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F40, F40);
+    __ mov(to, G1);
+    __ and3(to, -8, to);
+    __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
+    __ stf(FloatRegisterImpl::D, F56, to, 8);
+    __ stf(FloatRegisterImpl::D, F42, to, 16);
+    __ stf(FloatRegisterImpl::D, F60, to, 24);
+    __ add(to, 32, to);
+    __ orn(G0, G2, G2);
+    __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
+    __ mov(G1, to);
+
+    __ BIND(L_check_decrypt_loop_end128);
     __ add(from, 32, from);
     __ add(to, 32, to);
     __ subcc(len_reg, 32, len_reg);
     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
     __ delayed()->nop();
-    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
-    __ delayed()->nop();
+    __ ba_short(L_cbcdec_end);
 
     __ align(OptoLoopAlignment);
     __ BIND(L_dec_next2_blocks192);
     __ nop();
 
-    // F48:F50 used for first 16-bytes
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192);
+    __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G4, G5, L4 and L5
     __ ldx(from,0,G4);
     __ ldx(from,8,G5);
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ ba_short(L_transform_next2_blocks192);
+
+    __ BIND(L_load_misaligned_next2_blocks192);
+    __ alignaddr(from, G0, from);
+    // F48, F50, F52, F60, F62 can be clobbered
+    __ ldf(FloatRegisterImpl::D, from, 0, F48);
+    __ ldf(FloatRegisterImpl::D, from, 8, F50);
+    __ ldf(FloatRegisterImpl::D, from, 16, F60);
+    __ ldf(FloatRegisterImpl::D, from, 24, F62);
+    __ ldf(FloatRegisterImpl::D, from, 32, F52);
+    __ faligndata(F48, F50, F48);
+    __ faligndata(F50, F60, F50);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F52, F62);
+    __ movdtox(F48, G4);
+    __ movdtox(F50, G5);
+    __ movdtox(F60, L4);
+    __ movdtox(F62, L5);
+    __ mov(G1, from);
+
+    __ BIND(L_transform_next2_blocks192);
+    // F48:F50 used for first 16-bytes
     __ xor3(L2,G4,G1);
     __ movxtod(G1,F48);
     __ xor3(L3,G5,G1);
     __ movxtod(G1,F50);
 
     // F60:F62 used for next 16-bytes
-    __ ldx(from,16,L4);
-    __ ldx(from,24,L5);
     __ xor3(L2,L4,G1);
     __ movxtod(G1,F60);
     __ xor3(L3,L5,G1);
@@ -3955,9 +4379,6 @@
     __ fxor(FloatRegisterImpl::D, F54, F48, F48);
     __ fxor(FloatRegisterImpl::D, F52, F50, F50);
 
-    __ stf(FloatRegisterImpl::D, F48, to, 0);
-    __ stf(FloatRegisterImpl::D, F50, to, 8);
-
     __ movxtod(G4,F56);
     __ movxtod(G5,F58);
     __ mov(L4,L0);
@@ -3965,32 +4386,87 @@
     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, G1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192);
+    __ delayed()->edge8n(to, G0, G2);
+
+    // aligned case: store output into the destination array
+    __ stf(FloatRegisterImpl::D, F48, to, 0);
+    __ stf(FloatRegisterImpl::D, F50, to, 8);
     __ stf(FloatRegisterImpl::D, F60, to, 16);
     __ stf(FloatRegisterImpl::D, F62, to, 24);
-
+    __ ba_short(L_check_decrypt_loop_end192);
+
+    __ BIND(L_store_misaligned_output_next2_blocks192);
+    __ mov(8, G4);
+    __ sub(G4, G1, G4);
+    __ alignaddr(G4, G0, G4);
+    __ faligndata(F48, F50, F56); // F56 can be clobbered
+    __ faligndata(F50, F60, F50);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F48, F48);
+    __ mov(to, G1);
+    __ and3(to, -8, to);
+    __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
+    __ stf(FloatRegisterImpl::D, F56, to, 8);
+    __ stf(FloatRegisterImpl::D, F50, to, 16);
+    __ stf(FloatRegisterImpl::D, F60, to, 24);
+    __ add(to, 32, to);
+    __ orn(G0, G2, G2);
+    __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
+    __ mov(G1, to);
+
+    __ BIND(L_check_decrypt_loop_end192);
     __ add(from, 32, from);
     __ add(to, 32, to);
     __ subcc(len_reg, 32, len_reg);
     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
     __ delayed()->nop();
-    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
-    __ delayed()->nop();
+    __ ba_short(L_cbcdec_end);
 
     __ align(OptoLoopAlignment);
     __ BIND(L_dec_next2_blocks256);
     __ nop();
 
-    // F0:F2 used for first 16-bytes
+    // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(from, 7, G0);
+    __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256);
+    __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
+
+    // aligned case: load input into G4, G5, L4 and L5
     __ ldx(from,0,G4);
     __ ldx(from,8,G5);
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ ba_short(L_transform_next2_blocks256);
+
+    __ BIND(L_load_misaligned_next2_blocks256);
+    __ alignaddr(from, G0, from);
+    // F0, F2, F4, F60, F62 can be clobbered
+    __ ldf(FloatRegisterImpl::D, from, 0, F0);
+    __ ldf(FloatRegisterImpl::D, from, 8, F2);
+    __ ldf(FloatRegisterImpl::D, from, 16, F60);
+    __ ldf(FloatRegisterImpl::D, from, 24, F62);
+    __ ldf(FloatRegisterImpl::D, from, 32, F4);
+    __ faligndata(F0, F2, F0);
+    __ faligndata(F2, F60, F2);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F4, F62);
+    __ movdtox(F0, G4);
+    __ movdtox(F2, G5);
+    __ movdtox(F60, L4);
+    __ movdtox(F62, L5);
+    __ mov(G1, from);
+
+    __ BIND(L_transform_next2_blocks256);
+    // F0:F2 used for first 16-bytes
     __ xor3(L2,G4,G1);
     __ movxtod(G1,F0);
     __ xor3(L3,G5,G1);
     __ movxtod(G1,F2);
 
     // F60:F62 used for next 16-bytes
-    __ ldx(from,16,L4);
-    __ ldx(from,24,L5);
     __ xor3(L2,L4,G1);
     __ movxtod(G1,F60);
     __ xor3(L3,L5,G1);
@@ -4043,9 +4519,6 @@
     __ fxor(FloatRegisterImpl::D, F6, F0, F0);
     __ fxor(FloatRegisterImpl::D, F4, F2, F2);
 
-    __ stf(FloatRegisterImpl::D, F0, to, 0);
-    __ stf(FloatRegisterImpl::D, F2, to, 8);
-
     __ movxtod(G4,F56);
     __ movxtod(G5,F58);
     __ mov(L4,L0);
@@ -4053,9 +4526,38 @@
     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
 
+    // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
+    __ andcc(to, 7, G1);
+    __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256);
+    __ delayed()->edge8n(to, G0, G2);
+
+    // aligned case: store output into the destination array
+    __ stf(FloatRegisterImpl::D, F0, to, 0);
+    __ stf(FloatRegisterImpl::D, F2, to, 8);
     __ stf(FloatRegisterImpl::D, F60, to, 16);
     __ stf(FloatRegisterImpl::D, F62, to, 24);
-
+    __ ba_short(L_check_decrypt_loop_end256);
+
+    __ BIND(L_store_misaligned_output_next2_blocks256);
+    __ mov(8, G4);
+    __ sub(G4, G1, G4);
+    __ alignaddr(G4, G0, G4);
+    __ faligndata(F0, F2, F56); // F56 can be clobbered
+    __ faligndata(F2, F60, F2);
+    __ faligndata(F60, F62, F60);
+    __ faligndata(F62, F0, F0);
+    __ mov(to, G1);
+    __ and3(to, -8, to);
+    __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
+    __ stf(FloatRegisterImpl::D, F56, to, 8);
+    __ stf(FloatRegisterImpl::D, F2, to, 16);
+    __ stf(FloatRegisterImpl::D, F60, to, 24);
+    __ add(to, 32, to);
+    __ orn(G0, G2, G2);
+    __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
+    __ mov(G1, to);
+
+    __ BIND(L_check_decrypt_loop_end256);
     __ add(from, 32, from);
     __ add(to, 32, to);
     __ subcc(len_reg, 32, len_reg);
@@ -4063,6 +4565,7 @@
     __ delayed()->nop();
 
     __ BIND(L_cbcdec_end);
+    // re-init intial vector for next block, 8-byte alignment is guaranteed
     __ stx(L0, rvec, 0);
     __ stx(L1, rvec, 8);
     __ restore();
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -41,7 +41,7 @@
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
   code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 20000            // simply increase if too small (assembler will crash if too small)
+  code_size2 = 22000            // simply increase if too small (assembler will crash if too small)
 };
 
 class Sparc {
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1888,7 +1888,7 @@
   }
 
 #if INCLUDE_JVMTI
-  if (EnableInvokeDynamic) {
+  {
     Label L_done;
 
     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -3209,12 +3209,6 @@
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
 
-  if (!EnableInvokeDynamic) {
-    // rewriter does not generate this bytecode
-    __ should_not_reach_here();
-    return;
-  }
-
   const Register Rret       = Lscratch;
   const Register G4_mtype   = G4_scratch;
   const Register O0_recv    = O0;
@@ -3240,17 +3234,6 @@
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
 
-  if (!EnableInvokeDynamic) {
-    // We should not encounter this bytecode if !EnableInvokeDynamic.
-    // The verifier will stop it.  However, if we get past the verifier,
-    // this will stop the thread in a reasonable way, without crashing the JVM.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                     InterpreterRuntime::throw_IncompatibleClassChangeError));
-    // the call_VM checks for exception, so we should never return here.
-    __ should_not_reach_here();
-    return;
-  }
-
   const Register Rret        = Lscratch;
   const Register G4_callsite = G4_scratch;
   const Register Rscratch    = G3_scratch;
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -266,9 +266,9 @@
   if (!has_vis1()) // Drop to 0 if no VIS1 support
     UseVIS = 0;
 
-  // T2 and above should have support for AES instructions
+  // SPARC T4 and above should have support for AES instructions
   if (has_aes()) {
-    if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
+    if (UseVIS > 2) { // AES intrinsics use MOVxTOd/MOVdTOx which are VIS3
       if (FLAG_IS_DEFAULT(UseAES)) {
         FLAG_SET_DEFAULT(UseAES, true);
       }
@@ -282,7 +282,7 @@
       }
     } else {
         if (UseAES || UseAESIntrinsics) {
-          warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
+          warning("SPARC AES intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
           if (UseAES) {
             FLAG_SET_DEFAULT(UseAES, false);
           }
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1766,7 +1766,7 @@
 
 // Move Unaligned 256bit Vector
 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
-  assert(UseAVX, "");
+  assert(UseAVX > 0, "");
   bool vector256 = true;
   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
   emit_int8(0x6F);
@@ -1774,7 +1774,7 @@
 }
 
 void Assembler::vmovdqu(XMMRegister dst, Address src) {
-  assert(UseAVX, "");
+  assert(UseAVX > 0, "");
   InstructionMark im(this);
   bool vector256 = true;
   vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
@@ -1783,7 +1783,7 @@
 }
 
 void Assembler::vmovdqu(Address dst, XMMRegister src) {
-  assert(UseAVX, "");
+  assert(UseAVX > 0, "");
   InstructionMark im(this);
   bool vector256 = true;
   // swap src<->dst for encoding
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -207,7 +207,6 @@
   if (index_size == sizeof(u2)) {
     load_unsigned_short(reg, Address(rsi, bcp_offset));
   } else if (index_size == sizeof(u4)) {
-    assert(EnableInvokeDynamic, "giant index used only for JSR 292");
     movl(reg, Address(rsi, bcp_offset));
     // Check if the secondary index definition is still ~x, otherwise
     // we have to change the following assembler code to calculate the
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -205,7 +205,6 @@
   if (index_size == sizeof(u2)) {
     load_unsigned_short(index, Address(r13, bcp_offset));
   } else if (index_size == sizeof(u4)) {
-    assert(EnableInvokeDynamic, "giant index used only for JSR 292");
     movl(index, Address(r13, bcp_offset));
     // Check if the secondary index definition is still ~x, otherwise
     // we have to change the following assembler code to calculate the
--- a/hotspot/src/cpu/x86/vm/jni_x86.h	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/jni_x86.h	Wed Jul 05 19:40:49 2017 +0200
@@ -26,8 +26,14 @@
 #ifndef _JAVASOFT_JNI_MD_H_
 #define _JAVASOFT_JNI_MD_H_
 
-#if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
+#if defined(_WIN32)
+  #define JNIEXPORT __declspec(dllexport)
+  #define JNIIMPORT __declspec(dllimport)
+  #define JNICALL __stdcall
 
+  typedef int jint;
+  typedef __int64 jlong;
+#else
 
 // Note: please do not change these without also changing jni_md.h in the JDK
 // repository
@@ -50,13 +56,6 @@
   typedef long long jlong;
 #endif
 
-#else
-  #define JNIEXPORT __declspec(dllexport)
-  #define JNIIMPORT __declspec(dllimport)
-  #define JNICALL __stdcall
-
-  typedef int jint;
-  typedef __int64 jlong;
 #endif
 
 typedef signed char jbyte;
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -3152,10 +3152,12 @@
   // if fast computation is not possible, result is NaN. Requires
   // fallback from user of this macro.
   // increase precision for intermediate steps of the computation
+  BLOCK_COMMENT("fast_pow {");
   increase_precision();
   fyl2x();                 // Stack: (Y*log2(X)) ...
   pow_exp_core_encoding(); // Stack: exp(X) ...
   restore_precision();
+  BLOCK_COMMENT("} fast_pow");
 }
 
 void MacroAssembler::fast_exp() {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -76,12 +76,7 @@
     Interpreter::stackElementWords;
 
 #ifdef ASSERT
-  if (!EnableInvokeDynamic) {
-    // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
-    // Probably, since deoptimization doesn't work yet.
-    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
-  }
-  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
+  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
 #endif
 
   interpreter_frame->interpreter_frame_set_method(method);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1831,7 +1831,7 @@
   __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
 
 #if INCLUDE_JVMTI
-  if (EnableInvokeDynamic) {
+  {
     Label L_done;
     const Register local0 = rdi;
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1848,7 +1848,7 @@
           JavaThread::popframe_inactive);
 
 #if INCLUDE_JVMTI
-  if (EnableInvokeDynamic) {
+  {
     Label L_done;
     const Register local0 = r14;
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -3128,12 +3128,6 @@
   const Register rcx_recv   = rcx;
   const Register rdx_flags  = rdx;
 
-  if (!EnableInvokeDynamic) {
-    // rewriter does not generate this bytecode
-    __ should_not_reach_here();
-    return;
-  }
-
   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
   __ verify_method_ptr(rbx_method);
   __ verify_oop(rcx_recv);
@@ -3156,17 +3150,6 @@
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
 
-  if (!EnableInvokeDynamic) {
-    // We should not encounter this bytecode if !EnableInvokeDynamic.
-    // The verifier will stop it.  However, if we get past the verifier,
-    // this will stop the thread in a reasonable way, without crashing the JVM.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                     InterpreterRuntime::throw_IncompatibleClassChangeError));
-    // the call_VM checks for exception, so we should never return here.
-    __ should_not_reach_here();
-    return;
-  }
-
   const Register rbx_method   = rbx;
   const Register rax_callsite = rax;
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -3179,12 +3179,6 @@
   const Register rcx_recv   = rcx;
   const Register rdx_flags  = rdx;
 
-  if (!EnableInvokeDynamic) {
-    // rewriter does not generate this bytecode
-    __ should_not_reach_here();
-    return;
-  }
-
   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
   __ verify_method_ptr(rbx_method);
   __ verify_oop(rcx_recv);
@@ -3207,17 +3201,6 @@
   transition(vtos, vtos);
   assert(byte_no == f1_byte, "use this argument");
 
-  if (!EnableInvokeDynamic) {
-    // We should not encounter this bytecode if !EnableInvokeDynamic.
-    // The verifier will stop it.  However, if we get past the verifier,
-    // this will stop the thread in a reasonable way, without crashing the JVM.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
-                     InterpreterRuntime::throw_IncompatibleClassChangeError));
-    // the call_VM checks for exception, so we should never return here.
-    __ should_not_reach_here();
-    return;
-  }
-
   const Register rbx_method   = rbx;
   const Register rax_callsite = rax;
 
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -263,6 +263,10 @@
     // and check upper YMM bits after it.
     //
     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
+    intx saved_useavx = UseAVX;
+    intx saved_usesse = UseSSE;
+    UseAVX = 1;
+    UseSSE = 2;
 
     // load value into all 32 bytes of ymm7 register
     __ movl(rcx, VM_Version::ymm_test_value());
@@ -292,6 +296,8 @@
 #endif
 
     VM_Version::clean_cpuFeatures();
+    UseAVX = saved_useavx;
+    UseSSE = saved_usesse;
 
     //
     // cpuid(0x7) Structured Extended Features
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -40,6 +40,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -55,6 +55,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,12 +26,9 @@
 #ifndef OS_AIX_VM_OS_AIX_INLINE_HPP
 #define OS_AIX_VM_OS_AIX_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "atomic_aix_ppc.inline.hpp"
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
 
 // System includes
 
--- a/hotspot/src/os/aix/vm/thread_aix.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/aix/vm/thread_aix.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,13 +26,10 @@
 #ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
 #define OS_AIX_VM_THREAD_AIX_INLINE_HPP
 
-#include "runtime/atomic.hpp"
 #include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
-#include "atomic_aix_ppc.inline.hpp"
-#include "orderAccess_aix_ppc.inline.hpp"
 #include "prefetch_aix_ppc.inline.hpp"
 
 // Contains inlined functions for class Thread and ThreadLocalStorage
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -48,6 +48,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,15 +26,9 @@
 #define OS_BSD_VM_OS_BSD_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
-
 // System includes
 
 #include <unistd.h>
--- a/hotspot/src/os/bsd/vm/thread_bsd.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/bsd/vm/thread_bsd.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,18 +29,12 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #ifdef TARGET_OS_ARCH_bsd_x86
-# include "atomic_bsd_x86.inline.hpp"
-# include "orderAccess_bsd_x86.inline.hpp"
 # include "prefetch_bsd_x86.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_bsd_zero
-# include "atomic_bsd_zero.inline.hpp"
-# include "orderAccess_bsd_zero.inline.hpp"
 # include "prefetch_bsd_zero.inline.hpp"
 #endif
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -49,6 +49,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -5271,7 +5272,6 @@
 //
 
 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
-  static bool proc_task_unchecked = true;
   pid_t  tid = thread->osthread()->thread_id();
   char *s;
   char stat[2048];
@@ -5284,24 +5284,7 @@
   long ldummy;
   FILE *fp;
 
-  snprintf(proc_name, 64, "/proc/%d/stat", tid);
-
-  // The /proc/<tid>/stat aggregates per-process usage on
-  // new Linux kernels 2.6+ where NPTL is supported.
-  // The /proc/self/task/<tid>/stat still has the per-thread usage.
-  // See bug 6328462.
-  // There possibly can be cases where there is no directory
-  // /proc/self/task, so we check its availability.
-  if (proc_task_unchecked && os::Linux::is_NPTL()) {
-    // This is executed only once
-    proc_task_unchecked = false;
-    fp = fopen("/proc/self/task", "r");
-    if (fp != NULL) {
-      snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
-      fclose(fp);
-    }
-  }
-
+  snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
   fp = fopen(proc_name, "r");
   if ( fp == NULL ) return -1;
   statlen = fread(stat, 1, 2047, fp);
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,24 +26,9 @@
 #define OS_LINUX_VM_OS_LINUX_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-
 // System includes
 
 #include <unistd.h>
--- a/hotspot/src/os/linux/vm/thread_linux.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/linux/vm/thread_linux.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,33 +29,22 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
 #include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #ifdef TARGET_OS_ARCH_linux_x86
-# include "atomic_linux_x86.inline.hpp"
-# include "orderAccess_linux_x86.inline.hpp"
 # include "prefetch_linux_x86.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_linux_sparc
-# include "atomic_linux_sparc.inline.hpp"
-# include "orderAccess_linux_sparc.inline.hpp"
 # include "prefetch_linux_sparc.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_linux_zero
-# include "atomic_linux_zero.inline.hpp"
-# include "orderAccess_linux_zero.inline.hpp"
 # include "prefetch_linux_zero.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_linux_arm
-# include "atomic_linux_arm.inline.hpp"
-# include "orderAccess_linux_arm.inline.hpp"
 # include "prefetch_linux_arm.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_linux_ppc
-# include "atomic_linux_ppc.inline.hpp"
-# include "orderAccess_linux_ppc.inline.hpp"
 # include "prefetch_linux_ppc.inline.hpp"
 #endif
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -48,6 +48,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,15 +26,9 @@
 #define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-
 // System includes
 #include <sys/param.h>
 #include <dlfcn.h>
--- a/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,18 +29,14 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #ifdef TARGET_OS_ARCH_solaris_x86
-# include "atomic_solaris_x86.inline.hpp"
-# include "orderAccess_solaris_x86.inline.hpp"
 # include "prefetch_solaris_x86.inline.hpp"
 #endif
 #ifdef TARGET_OS_ARCH_solaris_sparc
-# include "atomic_solaris_sparc.inline.hpp"
-# include "orderAccess_solaris_sparc.inline.hpp"
 # include "prefetch_solaris_sparc.inline.hpp"
 #endif
 
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -51,6 +51,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,12 +26,9 @@
 #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-
 inline const char* os::file_separator()                { return "\\"; }
 inline const char* os::line_separator()                { return "\r\n"; }
 inline const char* os::path_separator()                { return ";"; }
--- a/hotspot/src/os/windows/vm/thread_windows.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os/windows/vm/thread_windows.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,13 +29,10 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
 #include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #ifdef TARGET_OS_ARCH_windows_x86
-# include "atomic_windows_x86.inline.hpp"
-# include "orderAccess_windows_x86.inline.hpp"
 # include "prefetch_windows_x86.inline.hpp"
 #endif
 
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
 #define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
 
-#include "orderAccess_aix_ppc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_ppc.hpp"
--- a/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
 #define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
 
-#include "orderAccess_linux_ppc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_ppc.hpp"
@@ -53,41 +52,41 @@
 
 inline jlong Atomic::load(volatile jlong* src) { return *src; }
 
-/*
-  machine barrier instructions:
-
-  - sync            two-way memory barrier, aka fence
-  - lwsync          orders  Store|Store,
-                             Load|Store,
-                             Load|Load,
-                    but not Store|Load
-  - eieio           orders memory accesses for device memory (only)
-  - isync           invalidates speculatively executed instructions
-                    From the POWER ISA 2.06 documentation:
-                     "[...] an isync instruction prevents the execution of
-                    instructions following the isync until instructions
-                    preceding the isync have completed, [...]"
-                    From IBM's AIX assembler reference:
-                     "The isync [...] instructions causes the processor to
-                    refetch any instructions that might have been fetched
-                    prior to the isync instruction. The instruction isync
-                    causes the processor to wait for all previous instructions
-                    to complete. Then any instructions already fetched are
-                    discarded and instruction processing continues in the
-                    environment established by the previous instructions."
-
-  semantic barrier instructions:
-  (as defined in orderAccess.hpp)
-
-  - release         orders Store|Store,       (maps to lwsync)
-                            Load|Store
-  - acquire         orders  Load|Store,       (maps to lwsync)
-                            Load|Load
-  - fence           orders Store|Store,       (maps to sync)
-                            Load|Store,
-                            Load|Load,
-                           Store|Load
-*/
+//
+// machine barrier instructions:
+//
+// - sync            two-way memory barrier, aka fence
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders memory accesses for device memory (only)
+// - isync           invalidates speculatively executed instructions
+//                   From the POWER ISA 2.06 documentation:
+//                    "[...] an isync instruction prevents the execution of
+//                   instructions following the isync until instructions
+//                   preceding the isync have completed, [...]"
+//                   From IBM's AIX assembler reference:
+//                    "The isync [...] instructions causes the processor to
+//                   refetch any instructions that might have been fetched
+//                   prior to the isync instruction. The instruction isync
+//                   causes the processor to wait for all previous instructions
+//                   to complete. Then any instructions already fetched are
+//                   discarded and instruction processing continues in the
+//                   environment established by the previous instructions."
+//
+// semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
 
 #define strasm_sync                       "\n  sync    \n"
 #define strasm_lwsync                     "\n  lwsync  \n"
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -78,12 +78,12 @@
   __asm__ volatile(
     "1: \n\t"
     " ldx    [%2], %%o2\n\t"
-    " add    %0, %%o2, %%o3\n\t"
+    " add    %1, %%o2, %%o3\n\t"
     " casx   [%2], %%o2, %%o3\n\t"
     " cmp    %%o2, %%o3\n\t"
     " bne    %%xcc, 1b\n\t"
     "  nop\n\t"
-    " add    %0, %%o2, %0\n\t"
+    " add    %1, %%o2, %0\n\t"
     : "=r" (rv)
     : "r" (add_value), "r" (dest)
     : "memory", "o2", "o3");
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -302,29 +302,30 @@
   if (context == NULL) return;
 
   ucontext_t *uc = (ucontext_t*)context;
+  sigcontext* sc = (sigcontext*)context;
   intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
 
   st->print_cr("Register to memory mapping:");
   st->cr();
 
   // this is only for the "general purpose" registers
-  st->print("G1="); print_location(st, SIG_REGS(sc).u_regs[CON__G1]);
-  st->print("G2="); print_location(st, SIG_REGS(sc).u_regs[CON__G2]);
-  st->print("G3="); print_location(st, SIG_REGS(sc).u_regs[CON__G3]);
-  st->print("G4="); print_location(st, SIG_REGS(sc).u_regs[CON__G4]);
-  st->print("G5="); print_location(st, SIG_REGS(sc).u_regs[CON__G5]);
-  st->print("G6="); print_location(st, SIG_REGS(sc).u_regs[CON__G6]);
-  st->print("G7="); print_location(st, SIG_REGS(sc).u_regs[CON__G7]);
+  st->print("G1="); print_location(st, SIG_REGS(sc).u_regs[CON_G1]);
+  st->print("G2="); print_location(st, SIG_REGS(sc).u_regs[CON_G2]);
+  st->print("G3="); print_location(st, SIG_REGS(sc).u_regs[CON_G3]);
+  st->print("G4="); print_location(st, SIG_REGS(sc).u_regs[CON_G4]);
+  st->print("G5="); print_location(st, SIG_REGS(sc).u_regs[CON_G5]);
+  st->print("G6="); print_location(st, SIG_REGS(sc).u_regs[CON_G6]);
+  st->print("G7="); print_location(st, SIG_REGS(sc).u_regs[CON_G7]);
   st->cr();
 
-  st->print("O0="); print_location(st, SIG_REGS(sc).u_regs[CON__O0]);
-  st->print("O1="); print_location(st, SIG_REGS(sc).u_regs[CON__O1]);
-  st->print("O2="); print_location(st, SIG_REGS(sc).u_regs[CON__O2]);
-  st->print("O3="); print_location(st, SIG_REGS(sc).u_regs[CON__O3]);
-  st->print("O4="); print_location(st, SIG_REGS(sc).u_regs[CON__O4]);
-  st->print("O5="); print_location(st, SIG_REGS(sc).u_regs[CON__O5]);
-  st->print("O6="); print_location(st, SIG_REGS(sc).u_regs[CON__O6]);
-  st->print("O7="); print_location(st, SIG_REGS(sc).u_regs[CON__O7]);
+  st->print("O0="); print_location(st, SIG_REGS(sc).u_regs[CON_O0]);
+  st->print("O1="); print_location(st, SIG_REGS(sc).u_regs[CON_O1]);
+  st->print("O2="); print_location(st, SIG_REGS(sc).u_regs[CON_O2]);
+  st->print("O3="); print_location(st, SIG_REGS(sc).u_regs[CON_O3]);
+  st->print("O4="); print_location(st, SIG_REGS(sc).u_regs[CON_O4]);
+  st->print("O5="); print_location(st, SIG_REGS(sc).u_regs[CON_O5]);
+  st->print("O6="); print_location(st, SIG_REGS(sc).u_regs[CON_O6]);
+  st->print("O7="); print_location(st, SIG_REGS(sc).u_regs[CON_O7]);
   st->cr();
 
   st->print("L0="); print_location(st, sp[L0->sp_offset_in_saved_window()]);
@@ -516,7 +517,7 @@
   if (nativeInstruction_at(*pc)->is_ic_miss_trap()) {
 #ifdef ASSERT
 #ifdef TIERED
-    CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+    CodeBlob* cb = CodeCache::find_blob_unsafe(*pc);
     assert(cb->is_compiled_by_c2(), "Wrong compiler");
 #endif // TIERED
 #endif // ASSERT
--- a/hotspot/src/share/vm/adlc/filebuff.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/adlc/filebuff.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,200 +89,6 @@
   return retval;
 }
 
-//------------------------------FileBuffRegion---------------------------------
-// Create a new region in a FileBuff.
-FileBuffRegion::FileBuffRegion( FileBuff* bufr, int soln, int ln,
-                                int off, int len)
-: _bfr(bufr), _sol(soln), _line(ln), _offset(off), _length(len) {
-  _next = NULL;                 // No chained regions
-}
-
-//------------------------------~FileBuffRegion--------------------------------
-// Delete the entire linked list of buffer regions.
-FileBuffRegion::~FileBuffRegion() {
-  if( _next ) delete _next;
-}
-
-//------------------------------copy-------------------------------------------
-// Deep copy a FileBuffRegion
-FileBuffRegion *FileBuffRegion::copy() {
-  if( !this ) return NULL;      // The empty buffer region
-  FileBuffRegion *br = new FileBuffRegion(_bfr,_sol,_line,_offset,_length);
-  if( _next ) br->_next = _next->copy();
-  return br;
-}
-
-//------------------------------merge------------------------------------------
-// Merge another buffer region into this buffer region.  Make overlapping areas
-// become a single region.  Remove (delete) the input FileBuffRegion.
-// Since the buffer regions are sorted by file offset, this is a varient of a
-// "sorted-merge" running in linear time.
-FileBuffRegion *FileBuffRegion::merge( FileBuffRegion *br ) {
-  if( !br ) return this;        // Merging nothing
-  if( !this ) return br;        // Merging into nothing
-
-  assert( _bfr == br->_bfr, "" );     // Check for pointer-equivalent buffers
-
-  if( _offset < br->_offset ) { // "this" starts before "br"
-    if( _offset+_length < br->_offset ) { // "this" ends before "br"
-      if( _next ) _next->merge( br );    // Merge with remainder of list
-      else _next = br;                 // No more in this list; just append.
-    } else {                           // Regions overlap.
-      int l = br->_offset + br->_length - _offset;
-      if( l > _length ) _length = l;     // Pick larger region
-      FileBuffRegion *nr = br->_next;     // Get rest of region
-      br->_next = NULL;         // Remove indication of rest of region
-      delete br;                // Delete this region (it's been subsumed).
-      if( nr ) merge( nr );     // Merge with rest of region
-    }                           // End of if regions overlap or not.
-  } else {                      // "this" starts after "br"
-    if( br->_offset+br->_length < _offset ) {    // "br" ends before "this"
-      FileBuffRegion *nr = new FileBuffRegion(_bfr,_sol,_line,_offset,_length);
-      nr->_next = _next;                // Structure copy "this" guy to "nr"
-      *this = *br;              // Structure copy "br" over "this".
-      br->_next = NULL;         // Remove indication of rest of region
-      delete br;                // Delete this region (it's been copied)
-      merge( nr );              // Finish merging
-    } else {                    // Regions overlap.
-      int l = _offset + _length - br->_offset;
-      if( l > _length ) _length = l;    // Pick larger region
-      _offset = br->_offset;            // Start with earlier region
-      _sol = br->_sol;                  // Also use earlier line start
-      _line = br->_line;                        // Also use earlier line
-      FileBuffRegion *nr = br->_next;   // Get rest of region
-      br->_next = NULL;         // Remove indication of rest of region
-      delete br;                // Delete this region (it's been subsumed).
-      if( nr ) merge( nr );     // Merge with rest of region
-    }                           // End of if regions overlap or not.
-  }
-  return this;
-}
-
-//------------------------------expandtab--------------------------------------
-static int expandtab( ostream &os, int off, char c, char fill1, char fill2 ) {
-  if( c == '\t' ) {             // Tab?
-    do os << fill1;             // Expand the tab; Output space
-    while( (++off) & 7 );       // Expand to tab stop
-  } else {                      // Normal character
-    os << fill2;                // Display normal character
-    off++;                      // Increment "cursor" offset
-  }
-  return off;
-}
-
-//------------------------------printline--------------------------------------
-// Print and highlite a region of a line.  Return the amount of highliting left
-// to do (i.e. highlite length minus length of line).
-static int printline( ostream& os, const char *fname, int line,
-                        const char *_sol, int skip, int len ) {
-
-  // Display the entire tab-expanded line
-  os << fname << ":" << line << ": ";
-  const char *t = strchr(_sol,'\n')+1; // End of line
-  int off = 0;                  // Cursor offset for tab expansion
-  const char *s = _sol;         // Nice string pointer
-  while( t-s ) {                // Display whole line
-    char c = *s++;              // Get next character to display
-    off = expandtab(os,off,c,' ',c);
-  }
-
-  // Display the tab-expanded skippings before underlining.
-  os << fname << ":" << line << ": ";
-  off = 0;                      // Cursor offset for tab expansion
-  s = _sol;                     // Restart string pointer
-
-  // Start underlining.
-  if( skip != -1 ) {            // The no-start-indicating flag
-    const char *u = _sol+skip;  // Amount to skip
-    while( u-s )                // Display skipped part
-      off = expandtab(os,off,*s++,' ',' ');
-    os << '^';                  // Start region
-    off++;                      // Moved cursor
-    len--;                      // 1 less char to do
-    if( *s++ == '\t' )          // Starting character is a tab?
-      off = expandtab(os,off,'\t','-','^');
-  }
-
-  // Long region doesn't end on this line
-  int llen = (int)(t-s);        // Length of line, minus what's already done
-  if( len > llen ) {            // Doing entire rest of line?
-    while( t-s )                // Display rest of line
-      off = expandtab(os,off,*s++,'-','-');
-    os << '\n';                 // EOL
-    return len-llen;            // Return what's not yet done.
-  }
-
-  // Region does end on this line.  This code fails subtly if the region ends
-  // in a tab character.
-  int i;
-  for( i=1; i<len; i++ )        // Underline just what's needed
-    off = expandtab(os,off,*s++,'-','-');
-  if( i == len ) os << '^';     // Mark end of region
-  os << '\n';                   // End of marked line
-  return 0;                     // All done
-}
-
-//------------------------------print------------------------------------------
-//std::ostream& operator<< ( std::ostream& os, FileBuffRegion &br ) {
-ostream& operator<< ( ostream& os, FileBuffRegion &br ) {
-  if( &br == NULL ) return os;  // The empty buffer region
-  FileBuffRegion *brp = &br;    // Pointer to region
-  while( brp ) {                // While have chained regions
-    brp->print(os);             // Print region
-    brp = brp->_next;           // Chain to next
-  }
-  return os;                    // Return final stream
-}
-
-//------------------------------print------------------------------------------
-// Print the FileBuffRegion to a stream. FileBuffRegions are printed with the
-// filename and line number to the left, and complete text lines to the right.
-// Selected portions (portions of a line actually in the FileBuffRegion are
-// underlined.  Ellipses are used for long multi-line regions.
-//void FileBuffRegion::print( std::ostream& os ) {
-void FileBuffRegion::print( ostream& os ) {
-  if( !this ) return;           // Nothing to print
-  char *s = _bfr->get_line();
-  int skip = (int)(_offset - _sol);     // Amount to skip to start of data
-  int len = printline( os, _bfr->_fp->_name, _line, s, skip, _length );
-
-  if( !len ) return;                    // All done; exit
-
-  // Here we require at least 2 lines
-  int off1 = _length - len + skip;      // Length of line 1
-  int off2 = off1 + _sol;               // Offset to start of line 2
-  char *s2 = _bfr->get_line();           // Start of line 2
-  char *s3 = strchr( s2, '\n' )+1;      // Start of line 3 (unread)
-  if( len <= (s3-s2) ) {                // It all fits on the next line
-    printline( os, _bfr->_fp->_name, _line+1, s2, -1, len ); // Print&underline
-    return;
-  }
-
-  // Here we require at least 3 lines
-  int off3 = off2 + (int)(s3-s2);       // Offset to start of line 3
-  s3 = _bfr->get_line();                // Start of line 3 (read)
-  const char *s4 = strchr( s3, '\n' )+1;// Start of line 4 (unread)
-  if( len < (s4-s3) ) {                 // It all fits on the next 2 lines
-    s2 = _bfr->get_line();
-    len = printline( os, _bfr->_fp->_name, _line+1, s2, -1, len ); // Line 2
-    s3 = _bfr->get_line();
-    printline( os, _bfr->_fp->_name, _line+2, s3, -1, len );     // Line 3
-    return;
-  }
-
-  // Here we require at least 4 lines.
-  // Print only the 1st and last line, with ellipses in middle.
-  os << "...\n";                // The ellipses
-  int cline = _line+1;          // Skipped 2 lines
-  do {                          // Do until find last line
-    len -= (int)(s3-s2);        // Remove length of line
-    cline++;                    // Next line
-    s2 = _bfr->get_line();      // Get next line from end of this line
-    s3 = strchr( s2, '\n' ) + 1;// Get end of next line
-  } while( len > (s3-s2) );     // Repeat until last line
-  printline( os, _bfr->_fp->_name, cline, s2, -1, len ); // Print & underline
-}
-
 //------------------------------file_error-------------------------------------
 void FileBuff::file_error(int flag, int linenum, const char *fmt, ...)
 {
--- a/hotspot/src/share/vm/adlc/filebuff.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/adlc/filebuff.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,6 @@
 // This class defines a nicely behaved buffer of text.  Entire file of text
 // is read into buffer at creation, with sentinels at start and end.
 class FileBuff {
-  friend class FileBuffRegion;
  private:
   long  _bufferSize;            // Size of text holding buffer.
   long  _offset;                // Expected filepointer offset.
@@ -82,29 +81,4 @@
   // when the pointer is valid (i.e. just obtained from getline()).
   long getoff(const char* s) { return _bufoff + (long)(s - _buf); }
 };
-
-//------------------------------FileBuffRegion---------------------------------
-// A buffer region is really a region of some file, specified as a linked list
-// of offsets and lengths.  These regions can be merged; overlapping regions
-// will coalesce.
-class FileBuffRegion {
- public:                        // Workaround dev-studio friend/private bug
-  FileBuffRegion *_next;        // Linked list of regions sorted by offset.
- private:
-  FileBuff       *_bfr;         // The Buffer of the file
-  int _offset, _length;         // The file area
-  int             _sol;         // Start of line where the file area starts
-  int             _line;        // First line of region
-
- public:
-  FileBuffRegion(FileBuff*, int sol, int line, int offset, int len);
-  ~FileBuffRegion();
-
-  FileBuffRegion *copy();                   // Deep copy
-  FileBuffRegion *merge(FileBuffRegion*); // Merge 2 regions; delete input
-
-  void print(ostream&);
-  friend ostream& operator<< (ostream&, FileBuffRegion&);
-};
-
 #endif // SHARE_VM_ADLC_FILEBUFF_HPP
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1701,6 +1701,15 @@
   return NULL;
 }
 
+void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) {
+#ifdef ASSERT
+  bool ignored_will_link;
+  ciSignature* declared_signature = NULL;
+  ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
+  assert(expected == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
+#endif
+}
+
 // Collect arguments that we want to profile in a list
 Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) {
   int start = 0;
@@ -1709,13 +1718,14 @@
     return NULL;
   }
   int s = obj_args->size();
-  for (int i = start, j = 0; j < s; i++) {
+  // if called through method handle invoke, some arguments may have been popped
+  for (int i = start, j = 0; j < s && i < args->length(); i++) {
     if (args->at(i)->type()->is_object_kind()) {
       obj_args->push(args->at(i));
       j++;
     }
   }
-  assert(s == obj_args->length(), "missed on arg?");
+  check_args_for_profiling(obj_args, s);
   return obj_args;
 }
 
@@ -3847,14 +3857,7 @@
             j++;
           }
         }
-#ifdef ASSERT
-        {
-          bool ignored_will_link;
-          ciSignature* declared_signature = NULL;
-          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
-          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
-        }
-#endif
+        check_args_for_profiling(obj_args, s);
       }
       profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
     }
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -392,6 +392,7 @@
 
   Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
   Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
+  void check_args_for_profiling(Values* obj_args, int expected);
 
  public:
   NOT_PRODUCT(void print_stats();)
--- a/hotspot/src/share/vm/c1/c1_IR.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -263,8 +263,7 @@
 // Implementation of IR
 
 IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
-    _locals_size(in_WordSize(-1))
-  , _num_loops(0) {
+  _num_loops(0) {
   // setup IR fields
   _compilation = compilation;
   _top_scope   = new IRScope(compilation, NULL, -1, method, osr_bci, true);
--- a/hotspot/src/share/vm/c1/c1_IR.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -293,7 +293,6 @@
  private:
   Compilation*     _compilation;                 // the current compilation
   IRScope*         _top_scope;                   // the root of the scope hierarchy
-  WordSize         _locals_size;                 // the space required for all locals
   int              _num_loops;                   // Total number of loops
   BlockList*       _code;                        // the blocks in code generation order w/ use counts
 
@@ -310,8 +309,6 @@
   BlockBegin*      start() const                 { return top_scope()->start(); }
   BlockBegin*      std_entry() const             { return start()->end()->as_Base()->std_entry(); }
   BlockBegin*      osr_entry() const             { return start()->end()->as_Base()->osr_entry(); }
-  WordSize         locals_size() const           { return _locals_size; }
-  int              locals_size_in_words() const  { return in_words(_locals_size); }
   BlockList*       code() const                  { return _code; }
   int              num_loops() const             { return _num_loops; }
   int              max_stack() const             { return top_scope()->max_stack(); } // expensive
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -2636,8 +2636,10 @@
       // LIR_Assembler::emit_profile_type() from emitting useless code
       profiled_k = ciTypeEntries::with_status(result, profiled_k);
     }
-    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
-      assert(exact_klass == NULL, "obj and signature disagree?");
+    // exact_klass and exact_signature_k can be both non NULL but
+    // different if exact_klass is loaded after the ciObject for
+    // exact_signature_k is created.
+    if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
       // sometimes the type of the signature is better than the best type
       // the compiler has
       exact_klass = exact_signature_k;
@@ -2648,8 +2650,7 @@
       if (improved_klass == NULL) {
         improved_klass = comp->cha_exact_type(callee_signature_k);
       }
-      if (improved_klass != NULL && exact_klass != improved_klass) {
-        assert(exact_klass == NULL, "obj and signature disagree?");
+      if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
         exact_klass = exact_signature_k;
       }
     }
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -51,6 +51,7 @@
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
--- a/hotspot/src/share/vm/ci/ciObject.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciObject.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -181,9 +181,8 @@
     if (klass() == env->String_klass() || klass() == env->Class_klass()) {
       return true;
     }
-  if (EnableInvokeDynamic &&
-      (klass()->is_subclass_of(env->MethodHandle_klass()) ||
-       klass()->is_subclass_of(env->CallSite_klass()))) {
+  if (klass()->is_subclass_of(env->MethodHandle_klass()) ||
+      klass()->is_subclass_of(env->CallSite_klass())) {
     assert(ScavengeRootsInCode >= 1, "must be");
     // We want to treat these aggressively.
     return true;
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -376,11 +376,15 @@
     int c = getc(_stream);
     while(c != EOF) {
       c = get_line(c);
-      process_command(CHECK);
+      process_command(THREAD);
       if (had_error()) {
         tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
-        tty->print_cr("%s", _buffer);
-        return;
+        if (ReplayIgnoreInitErrors) {
+          CLEAR_PENDING_EXCEPTION;
+          _error_message = NULL;
+        } else {
+          return;
+        }
       }
       line_no++;
     }
@@ -565,10 +569,14 @@
   void process_ciMethodData(TRAPS) {
     Method* method = parse_method(CHECK);
     if (had_error()) return;
-    /* jsut copied from Method, to build interpret data*/
+    /* just copied from Method, to build interpret data*/
     if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
       return;
     }
+    // To be properly initialized, some profiling in the MDO needs the
+    // method to be rewritten (number of arguments at a call for
+    // instance)
+    method->method_holder()->link_class(CHECK);
     // methodOopDesc::build_interpreter_method_data(method, CHECK);
     {
       // Grab a lock here to prevent multiple
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -164,11 +164,6 @@
             "Class file version does not support constant tag %u in class file %s",
             tag, CHECK);
         }
-        if (!EnableInvokeDynamic) {
-          classfile_parse_error(
-            "This JVM does not support constant tag %u in class file %s",
-            tag, CHECK);
-        }
         if (tag == JVM_CONSTANT_MethodHandle) {
           cfs->guarantee_more(4, CHECK);  // ref_kind, method_index, tag/access_flags
           u1 ref_kind = cfs->get_u1_fast();
@@ -189,11 +184,6 @@
               "Class file version does not support constant tag %u in class file %s",
               tag, CHECK);
           }
-          if (!EnableInvokeDynamic) {
-            classfile_parse_error(
-              "This JVM does not support constant tag %u in class file %s",
-              tag, CHECK);
-          }
           cfs->guarantee_more(5, CHECK);  // bsm_index, nt, tag/access_flags
           u2 bootstrap_specifier_index = cfs->get_u2_fast();
           u2 name_and_type_index = cfs->get_u2_fast();
@@ -263,7 +253,7 @@
             verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK);
           }
 
-          if (EnableInvokeDynamic && has_cp_patch_at(index)) {
+          if (has_cp_patch_at(index)) {
             Handle patch = clear_cp_patch_at(index);
             guarantee_property(java_lang_String::is_instance(patch()),
                                "Illegal utf8 patch at %d in class file %s",
@@ -419,8 +409,7 @@
         {
           int ref_index = cp->method_handle_index_at(index);
           check_property(
-            valid_cp_range(ref_index, length) &&
-                EnableInvokeDynamic,
+            valid_cp_range(ref_index, length),
               "Invalid constant pool index %u in class file %s",
               ref_index, CHECK_(nullHandle));
           constantTag tag = cp->tag_at(ref_index);
@@ -466,7 +455,7 @@
       case JVM_CONSTANT_MethodType :
         {
           int ref_index = cp->method_type_index_at(index);
-          check_property(valid_symbol_at(ref_index) && EnableInvokeDynamic,
+          check_property(valid_symbol_at(ref_index),
                  "Invalid constant pool index %u in class file %s",
                  ref_index, CHECK_(nullHandle));
         }
@@ -492,7 +481,6 @@
 
   if (_cp_patches != NULL) {
     // need to treat this_class specially...
-    assert(EnableInvokeDynamic, "");
     int this_class_index;
     {
       cfs->guarantee_more(8, CHECK_(nullHandle));  // flags, this_class, super_class, infs_len
@@ -640,7 +628,6 @@
 
 
 void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS) {
-  assert(EnableInvokeDynamic, "");
   BasicType patch_type = T_VOID;
 
   switch (cp->tag_at(index).value()) {
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -377,11 +377,9 @@
   char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS);
 
   bool is_anonymous() {
-    assert(EnableInvokeDynamic || _host_klass.is_null(), "");
     return _host_klass.not_null();
   }
   bool has_cp_patch_at(int index) {
-    assert(EnableInvokeDynamic, "");
     assert(index >= 0, "oob");
     return (_cp_patches != NULL
             && index < _cp_patches->length()
@@ -404,10 +402,7 @@
   // constant pool construction, but in later versions they can.
   // %%% Let's phase out the old is_klass_reference.
   bool valid_klass_reference_at(int index) {
-    return _cp->is_within_bounds(index) &&
-         (EnableInvokeDynamic
-            ? _cp->tag_at(index).is_klass_or_reference()
-            : _cp->tag_at(index).is_klass_reference());
+    return _cp->is_within_bounds(index) && _cp->tag_at(index).is_klass_or_reference();
   }
 
   // Checks that the cpool index is in range and is a utf8
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,6 +28,7 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -2646,7 +2646,7 @@
 
 void java_lang_invoke_DirectMethodHandle::compute_offsets() {
   Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
-  if (klass_oop != NULL && EnableInvokeDynamic) {
+  if (klass_oop != NULL) {
     compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
   }
 }
@@ -2668,18 +2668,15 @@
 
 void java_lang_invoke_MethodHandle::compute_offsets() {
   Klass* klass_oop = SystemDictionary::MethodHandle_klass();
-  if (klass_oop != NULL && EnableInvokeDynamic) {
+  if (klass_oop != NULL) {
     compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature());
-    compute_optional_offset(_form_offset, klass_oop, vmSymbols::form_name(), vmSymbols::java_lang_invoke_LambdaForm_signature());
-    if (_form_offset == 0) {
-      EnableInvokeDynamic = false;
-    }
+    compute_offset(_form_offset, klass_oop, vmSymbols::form_name(), vmSymbols::java_lang_invoke_LambdaForm_signature());
   }
 }
 
 void java_lang_invoke_MemberName::compute_offsets() {
   Klass* klass_oop = SystemDictionary::MemberName_klass();
-  if (klass_oop != NULL && EnableInvokeDynamic) {
+  if (klass_oop != NULL) {
     compute_offset(_clazz_offset,     klass_oop, vmSymbols::clazz_name(),     vmSymbols::class_signature());
     compute_offset(_name_offset,      klass_oop, vmSymbols::name_name(),      vmSymbols::string_signature());
     compute_offset(_type_offset,      klass_oop, vmSymbols::type_name(),      vmSymbols::object_signature());
@@ -2690,7 +2687,7 @@
 
 void java_lang_invoke_LambdaForm::compute_offsets() {
   Klass* klass_oop = SystemDictionary::LambdaForm_klass();
-  if (klass_oop != NULL && EnableInvokeDynamic) {
+  if (klass_oop != NULL) {
     compute_offset(_vmentry_offset, klass_oop, vmSymbols::vmentry_name(), vmSymbols::java_lang_invoke_MemberName_signature());
   }
 }
@@ -2905,7 +2902,6 @@
 int java_lang_invoke_CallSite::_target_offset;
 
 void java_lang_invoke_CallSite::compute_offsets() {
-  if (!EnableInvokeDynamic)  return;
   Klass* k = SystemDictionary::CallSite_klass();
   if (k != NULL) {
     compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
@@ -3296,14 +3292,12 @@
   java_lang_ClassLoader::compute_offsets();
   java_lang_Thread::compute_offsets();
   java_lang_ThreadGroup::compute_offsets();
-  if (EnableInvokeDynamic) {
-    java_lang_invoke_MethodHandle::compute_offsets();
-    java_lang_invoke_DirectMethodHandle::compute_offsets();
-    java_lang_invoke_MemberName::compute_offsets();
-    java_lang_invoke_LambdaForm::compute_offsets();
-    java_lang_invoke_MethodType::compute_offsets();
-    java_lang_invoke_CallSite::compute_offsets();
-  }
+  java_lang_invoke_MethodHandle::compute_offsets();
+  java_lang_invoke_DirectMethodHandle::compute_offsets();
+  java_lang_invoke_MemberName::compute_offsets();
+  java_lang_invoke_LambdaForm::compute_offsets();
+  java_lang_invoke_MethodType::compute_offsets();
+  java_lang_invoke_CallSite::compute_offsets();
   java_security_AccessControlContext::compute_offsets();
   // Initialize reflection classes. The layouts of these classes
   // changed with the new reflection implementation in JDK 1.4, and
--- a/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,13 @@
 
 // add new entry to the table
 void ResolutionErrorTable::add_entry(int index, unsigned int hash,
-                                     constantPoolHandle pool, int cp_index, Symbol* error)
+                                     constantPoolHandle pool, int cp_index,
+                                     Symbol* error, Symbol* message)
 {
   assert_locked_or_safepoint(SystemDictionary_lock);
   assert(!pool.is_null() && error != NULL, "adding NULL obj");
 
-  ResolutionErrorEntry* entry = new_entry(hash, pool(), cp_index, error);
+  ResolutionErrorEntry* entry = new_entry(hash, pool(), cp_index, error, message);
   add_entry(index, entry);
 }
 
@@ -58,19 +59,26 @@
 }
 
 void ResolutionErrorEntry::set_error(Symbol* e) {
-  assert(e == NULL || _error == NULL, "cannot reset error");
+  assert(e != NULL, "must set a value");
   _error = e;
-  if (_error != NULL) _error->increment_refcount();
+  _error->increment_refcount();
+}
+
+void ResolutionErrorEntry::set_message(Symbol* c) {
+  assert(c != NULL, "must set a value");
+  _message = c;
+  _message->increment_refcount();
 }
 
 // create new error entry
 ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, ConstantPool* pool,
-                                                      int cp_index, Symbol* error)
+                                                      int cp_index, Symbol* error,
+                                                      Symbol* message)
 {
   ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable<ConstantPool*, mtClass>::new_entry(hash, pool);
   entry->set_cp_index(cp_index);
-  NOT_PRODUCT(entry->set_error(NULL);)
   entry->set_error(error);
+  entry->set_message(message);
 
   return entry;
 }
@@ -79,6 +87,7 @@
   // decrement error refcount
   assert(entry->error() != NULL, "error should be set");
   entry->error()->decrement_refcount();
+  entry->message()->decrement_refcount();
   Hashtable<ConstantPool*, mtClass>::free_entry(entry);
 }
 
--- a/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/resolutionErrors.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,8 @@
 public:
   ResolutionErrorTable(int table_size);
 
-  ResolutionErrorEntry* new_entry(int hash, ConstantPool* pool, int cp_index, Symbol* error);
+  ResolutionErrorEntry* new_entry(int hash, ConstantPool* pool, int cp_index,
+                                  Symbol* error, Symbol* message);
   void free_entry(ResolutionErrorEntry *entry);
 
   ResolutionErrorEntry* bucket(int i) {
@@ -55,7 +56,7 @@
   }
 
   void add_entry(int index, unsigned int hash,
-                 constantPoolHandle pool, int which, Symbol* error);
+                 constantPoolHandle pool, int which, Symbol* error, Symbol* message);
 
 
   // find error given the constant pool and constant pool index
@@ -79,10 +80,10 @@
  private:
   int               _cp_index;
   Symbol*           _error;
+  Symbol*           _message;
 
  public:
-  ConstantPool*      pool() const               { return (ConstantPool*)literal(); }
-  ConstantPool**   pool_addr()                { return (ConstantPool**)literal_addr(); }
+  ConstantPool*      pool() const               { return literal(); }
 
   int                cp_index() const           { return _cp_index; }
   void               set_cp_index(int cp_index) { _cp_index = cp_index; }
@@ -90,6 +91,9 @@
   Symbol*            error() const              { return _error; }
   void               set_error(Symbol* e);
 
+  Symbol*            message() const            { return _message; }
+  void               set_message(Symbol* c);
+
   ResolutionErrorEntry* next() const {
     return (ResolutionErrorEntry*)HashtableEntry<ConstantPool*, mtClass>::next();
   }
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -52,6 +52,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
@@ -172,12 +173,14 @@
   if (HAS_PENDING_EXCEPTION || klass == NULL) {
     KlassHandle k_h(THREAD, klass);
     // can return a null klass
-    klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD);
+    klass = handle_resolution_exception(class_name, throw_error, k_h, THREAD);
   }
   return klass;
 }
 
-Klass* SystemDictionary::handle_resolution_exception(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS) {
+Klass* SystemDictionary::handle_resolution_exception(Symbol* class_name,
+                                                     bool throw_error,
+                                                     KlassHandle klass_h, TRAPS) {
   if (HAS_PENDING_EXCEPTION) {
     // If we have a pending exception we forward it to the caller, unless throw_error is true,
     // in which case we have to check whether the pending exception is a ClassNotFoundException,
@@ -385,7 +388,7 @@
   }
   if (HAS_PENDING_EXCEPTION || superk_h() == NULL) {
     // can null superk
-    superk_h = KlassHandle(THREAD, handle_resolution_exception(class_name, class_loader, protection_domain, true, superk_h, THREAD));
+    superk_h = KlassHandle(THREAD, handle_resolution_exception(class_name, true, superk_h, THREAD));
   }
 
   return superk_h();
@@ -971,7 +974,6 @@
   if (host_klass.not_null()) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
-    assert(EnableInvokeDynamic, "");
     guarantee(host_klass->class_loader() == class_loader(), "should be the same");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
     loader_data->record_dependency(host_klass(), CHECK_NULL);
@@ -996,7 +998,6 @@
 
 
   if (host_klass.not_null() && k.not_null()) {
-    assert(EnableInvokeDynamic, "");
     k->set_host_klass(host_klass());
     // If it's anonymous, initialize it now, since nobody else will.
 
@@ -1877,13 +1878,7 @@
   WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
   WKID jsr292_group_end   = WK_KLASS_ENUM_NAME(VolatileCallSite_klass);
   initialize_wk_klasses_until(jsr292_group_start, scan, CHECK);
-  if (EnableInvokeDynamic) {
-    initialize_wk_klasses_through(jsr292_group_end, scan, CHECK);
-  } else {
-    // Skip the JSR 292 classes, if not enabled.
-    scan = WKID(jsr292_group_end + 1);
-  }
-
+  initialize_wk_klasses_through(jsr292_group_end, scan, CHECK);
   initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK);
 
   _box_klasses[T_BOOLEAN] = WK_KLASS(Boolean_klass);
@@ -2119,12 +2114,13 @@
 
 // Add entry to resolution error table to record the error when the first
 // attempt to resolve a reference to a class has failed.
-void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which, Symbol* error) {
+void SystemDictionary::add_resolution_error(constantPoolHandle pool, int which,
+                                            Symbol* error, Symbol* message) {
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
   {
     MutexLocker ml(SystemDictionary_lock, Thread::current());
-    resolution_errors()->add_entry(index, hash, pool, which, error);
+    resolution_errors()->add_entry(index, hash, pool, which, error, message);
   }
 }
 
@@ -2134,13 +2130,19 @@
 }
 
 // Lookup resolution error table. Returns error if found, otherwise NULL.
-Symbol* SystemDictionary::find_resolution_error(constantPoolHandle pool, int which) {
+Symbol* SystemDictionary::find_resolution_error(constantPoolHandle pool, int which,
+                                                Symbol** message) {
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
   {
     MutexLocker ml(SystemDictionary_lock, Thread::current());
     ResolutionErrorEntry* entry = resolution_errors()->find_entry(index, hash, pool, which);
-    return (entry != NULL) ? entry->error() : (Symbol*)NULL;
+    if (entry != NULL) {
+      *message = entry->message();
+      return entry->error();
+    } else {
+      return NULL;
+    }
   }
 }
 
@@ -2221,7 +2223,6 @@
                                                             Symbol* signature,
                                                             TRAPS) {
   methodHandle empty;
-  assert(EnableInvokeDynamic, "");
   assert(MethodHandles::is_signature_polymorphic(iid) &&
          MethodHandles::is_signature_polymorphic_intrinsic(iid) &&
          iid != vmIntrinsics::_invokeGeneric,
@@ -2295,7 +2296,6 @@
                                                           Handle *method_type_result,
                                                           TRAPS) {
   methodHandle empty;
-  assert(EnableInvokeDynamic, "");
   assert(!THREAD->is_Compiler_thread(), "");
   Handle method_type =
     SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty));
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -151,16 +151,16 @@
                                                                                                                          \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                            \
   do_klass(DirectMethodHandle_klass,                    java_lang_invoke_DirectMethodHandle,       Opt                 ) \
-  do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre_JSR292          ) \
-  do_klass(MemberName_klass,                            java_lang_invoke_MemberName,               Pre_JSR292          ) \
-  do_klass(MethodHandleNatives_klass,                   java_lang_invoke_MethodHandleNatives,      Pre_JSR292          ) \
+  do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre                 ) \
+  do_klass(MemberName_klass,                            java_lang_invoke_MemberName,               Pre                 ) \
+  do_klass(MethodHandleNatives_klass,                   java_lang_invoke_MethodHandleNatives,      Pre                 ) \
   do_klass(LambdaForm_klass,                            java_lang_invoke_LambdaForm,               Opt                 ) \
-  do_klass(MethodType_klass,                            java_lang_invoke_MethodType,               Pre_JSR292          ) \
-  do_klass(BootstrapMethodError_klass,                  java_lang_BootstrapMethodError,            Pre_JSR292          ) \
-  do_klass(CallSite_klass,                              java_lang_invoke_CallSite,                 Pre_JSR292          ) \
-  do_klass(ConstantCallSite_klass,                      java_lang_invoke_ConstantCallSite,         Pre_JSR292          ) \
-  do_klass(MutableCallSite_klass,                       java_lang_invoke_MutableCallSite,          Pre_JSR292          ) \
-  do_klass(VolatileCallSite_klass,                      java_lang_invoke_VolatileCallSite,         Pre_JSR292          ) \
+  do_klass(MethodType_klass,                            java_lang_invoke_MethodType,               Pre                 ) \
+  do_klass(BootstrapMethodError_klass,                  java_lang_BootstrapMethodError,            Pre                 ) \
+  do_klass(CallSite_klass,                              java_lang_invoke_CallSite,                 Pre                 ) \
+  do_klass(ConstantCallSite_klass,                      java_lang_invoke_ConstantCallSite,         Pre                 ) \
+  do_klass(MutableCallSite_klass,                       java_lang_invoke_MutableCallSite,          Pre                 ) \
+  do_klass(VolatileCallSite_klass,                      java_lang_invoke_VolatileCallSite,         Pre                 ) \
   /* Note: MethodHandle must be first, and VolatileCallSite last in group */                                             \
                                                                                                                          \
   do_klass(StringBuffer_klass,                          java_lang_StringBuffer,                    Pre                 ) \
@@ -204,7 +204,6 @@
 
   enum InitOption {
     Pre,                        // preloaded; error if not present
-    Pre_JSR292,                 // preloaded if EnableInvokeDynamic
 
     // Order is significant.  Options before this point require resolve_or_fail.
     // Options after this point will use resolve_or_null instead.
@@ -228,7 +227,7 @@
   static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
 private:
   // handle error translation for resolve_or_null results
-  static Klass* handle_resolution_exception(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS);
+  static Klass* handle_resolution_exception(Symbol* class_name, bool throw_error, KlassHandle klass_h, TRAPS);
 
 public:
 
@@ -385,7 +384,6 @@
   }
 
   static Klass* check_klass_Pre(       Klass* k) { return check_klass(k); }
-  static Klass* check_klass_Pre_JSR292(Klass* k) { return EnableInvokeDynamic ? check_klass(k) : k; }
   static Klass* check_klass_Opt(       Klass* k) { return k; }
   static Klass* check_klass_Opt_Only_JDK15(Klass* k) {
     assert(JDK_Version::is_gte_jdk15x_version(), "JDK 1.5 only");
@@ -531,9 +529,11 @@
 
   // Record the error when the first attempt to resolve a reference from a constant
   // pool entry to a class fails.
-  static void add_resolution_error(constantPoolHandle pool, int which, Symbol* error);
+  static void add_resolution_error(constantPoolHandle pool, int which, Symbol* error,
+                                   Symbol* message);
   static void delete_resolution_error(ConstantPool* pool);
-  static Symbol* find_resolution_error(constantPoolHandle pool, int which);
+  static Symbol* find_resolution_error(constantPoolHandle pool, int which,
+                                       Symbol** message);
 
  private:
 
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -43,7 +43,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #ifdef TARGET_ARCH_x86
 # include "bytes_x86.hpp"
@@ -2361,12 +2361,9 @@
   // Get referenced class type
   VerificationType ref_class_type;
   if (opcode == Bytecodes::_invokedynamic) {
-    if (!EnableInvokeDynamic ||
-        _klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+    if (_klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
       class_format_error(
-        (!EnableInvokeDynamic ?
-         "invokedynamic instructions not enabled in this JVM" :
-         "invokedynamic instructions not supported by this class file version"),
+        "invokedynamic instructions not supported by this class file version",
         _klass->external_name());
       return;
     }
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -775,7 +775,7 @@
   /* java/lang/ref/Reference */                                                                                         \
   do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
                                                                                                                         \
-  /* support for com.sum.crypto.provider.AESCrypt and some of its callers */                                            \
+  /* support for com.sun.crypto.provider.AESCrypt and some of its callers */                                            \
   do_class(com_sun_crypto_provider_aescrypt,      "com/sun/crypto/provider/AESCrypt")                                   \
   do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
   do_intrinsic(_aescrypt_decryptBlock, com_sun_crypto_provider_aescrypt, decryptBlock_name, byteArray_int_byteArray_int_signature, F_R)   \
--- a/hotspot/src/share/vm/code/dependencies.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/code/dependencies.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,6 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
 
 
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -37,6 +37,7 @@
 #include "oops/methodData.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/jvmtiImpl.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "utilities/resourceHash.hpp"
@@ -750,7 +751,11 @@
     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
-    debug_only(verify_scavenge_root_oops());
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+      Universe::heap()->register_nmethod(this);
+    }
+    DEBUG_ONLY(verify_scavenge_root_oops();)
     CodeCache::commit(this);
   }
 
@@ -2284,13 +2289,13 @@
 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
   if (nm == NULL)  return;
   Atomic::inc(&nm->_lock_count);
-  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
+  assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
 }
 
 void nmethodLocker::unlock_nmethod(nmethod* nm) {
   if (nm == NULL)  return;
   Atomic::dec(&nm->_lock_count);
-  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
+  assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
 }
 
 
--- a/hotspot/src/share/vm/code/nmethod.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -203,7 +203,7 @@
   // and is not made into a zombie. However, once the nmethod is made into
   // a zombie, it will be locked one final time if CompiledMethodUnload
   // event processing needs to be done.
-  jint  _lock_count;
+  volatile jint _lock_count;
 
   // not_entrant method removal. Each mark_sweep pass will update
   // this mark to current sweep invocation count if it is seen on the
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -150,9 +150,8 @@
 
 long CompileBroker::_peak_compilation_time       = 0;
 
-CompileQueue* CompileBroker::_c2_method_queue    = NULL;
-CompileQueue* CompileBroker::_c1_method_queue    = NULL;
-CompileTask*  CompileBroker::_task_free_list     = NULL;
+CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
+CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
 
 GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
 
@@ -220,13 +219,56 @@
 
     // By convention, the compiling thread is responsible for
     // recycling a non-blocking CompileTask.
-    CompileBroker::free_task(task);
+    CompileTask::free(task);
   }
 }
 
 
-// ------------------------------------------------------------------
-// CompileTask::initialize
+CompileTask*  CompileTask::_task_free_list = NULL;
+#ifdef ASSERT
+int CompileTask::_num_allocated_tasks = 0;
+#endif
+/**
+ * Allocate a CompileTask, from the free list if possible.
+ */
+CompileTask* CompileTask::allocate() {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  CompileTask* task = NULL;
+
+  if (_task_free_list != NULL) {
+    task = _task_free_list;
+    _task_free_list = task->next();
+    task->set_next(NULL);
+  } else {
+    task = new CompileTask();
+    DEBUG_ONLY(_num_allocated_tasks++;)
+    assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?");
+    task->set_next(NULL);
+    task->set_is_free(true);
+  }
+  assert(task->is_free(), "Task must be free.");
+  task->set_is_free(false);
+  return task;
+}
+
+
+/**
+ * Add a task to the free list.
+ */
+void CompileTask::free(CompileTask* task) {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  if (!task->is_free()) {
+    task->set_code(NULL);
+    assert(!task->lock()->is_locked(), "Should not be locked when freed");
+    JNIHandles::destroy_global(task->_method_holder);
+    JNIHandles::destroy_global(task->_hot_method_holder);
+
+    task->set_is_free(true);
+    task->set_next(_task_free_list);
+    _task_free_list = task;
+  }
+}
+
 void CompileTask::initialize(int compile_id,
                              methodHandle method,
                              int osr_bci,
@@ -284,15 +326,6 @@
   if (nm == NULL)  _code_handle = NULL;  // drop the handle also
 }
 
-// ------------------------------------------------------------------
-// CompileTask::free
-void CompileTask::free() {
-  set_code(NULL);
-  assert(!_lock->is_locked(), "Should not be locked when freed");
-  JNIHandles::destroy_global(_method_holder);
-  JNIHandles::destroy_global(_hot_method_holder);
-}
-
 
 void CompileTask::mark_on_stack() {
   // Mark these methods as something redefine classes cannot remove.
@@ -555,9 +588,12 @@
 
 
 
-// Add a CompileTask to a CompileQueue
+/**
+ * Add a CompileTask to a CompileQueue
+ */
 void CompileQueue::add(CompileTask* task) {
   assert(lock()->owned_by_self(), "must own lock");
+  assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever");
 
   task->set_next(NULL);
   task->set_prev(NULL);
@@ -579,9 +615,7 @@
   // Mark the method as being in the compile queue.
   task->method()->set_queued_for_compilation();
 
-  if (CIPrintCompileQueue) {
-    print();
-  }
+  NOT_PRODUCT(print();)
 
   if (LogCompilation && xtty != NULL) {
     task->log_task_queued();
@@ -591,14 +625,29 @@
   lock()->notify_all();
 }
 
-void CompileQueue::delete_all() {
-  assert(lock()->owned_by_self(), "must own lock");
-  if (_first != NULL) {
-    for (CompileTask* task = _first; task != NULL; task = task->next()) {
-      delete task;
-    }
-    _first = NULL;
+/**
+ * Empties compilation queue by putting all compilation tasks onto
+ * a freelist. Furthermore, the method wakes up all threads that are
+ * waiting on a compilation task to finish. This can happen if background
+ * compilation is disabled.
+ */
+void CompileQueue::free_all() {
+  MutexLocker mu(lock());
+  CompileTask* next = _first;
+
+  // Iterate over all tasks in the compile queue
+  while (next != NULL) {
+    CompileTask* current = next;
+    next = current->next();
+    // Wake up thread that blocks on the compile task.
+    current->lock()->notify();
+    // Put the task back on the freelist.
+    CompileTask::free(current);
   }
+  _first = NULL;
+
+  // Wake up all threads that block on the queue.
+  lock()->notify_all();
 }
 
 // ------------------------------------------------------------------
@@ -691,18 +740,24 @@
   }
 }
 
-// ------------------------------------------------------------------
-// CompileQueue::print
+#ifndef PRODUCT
+/**
+ * Print entire compilation queue.
+ */
 void CompileQueue::print() {
-  tty->print_cr("Contents of %s", name());
-  tty->print_cr("----------------------");
-  CompileTask* task = _first;
-  while (task != NULL) {
-    task->print_line();
-    task = task->next();
+  if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
+    tty->print_cr("Contents of %s", name());
+    tty->print_cr("----------------------");
+    CompileTask* task = _first;
+    while (task != NULL) {
+      task->print_line();
+      task = task->next();
+    }
+    tty->print_cr("----------------------");
   }
-  tty->print_cr("----------------------");
 }
+#endif // PRODUCT
 
 CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) {
 
@@ -775,9 +830,6 @@
   _compilers[1] = new SharkCompiler();
 #endif // SHARK
 
-  // Initialize the CompileTask free list
-  _task_free_list = NULL;
-
   // Start the CompilerThreads
   init_compiler_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
@@ -970,11 +1022,11 @@
 #endif // !ZERO && !SHARK
   // Initialize the compilation queue
   if (c2_compiler_count > 0) {
-    _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+    _c2_compile_queue  = new CompileQueue("C2 CompileQueue",  MethodCompileQueue_lock);
     _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
-    _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+    _c1_compile_queue  = new CompileQueue("C1 CompileQueue",  MethodCompileQueue_lock);
     _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
@@ -989,7 +1041,7 @@
     sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // Shark and C2
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -998,7 +1050,7 @@
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // C1
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -1008,14 +1060,19 @@
 }
 
 
-// Set the methods on the stack as on_stack so that redefine classes doesn't
-// reclaim them
+/**
+ * Set the methods on the stack as on_stack so that redefine classes doesn't
+ * reclaim them. This method is executed at a safepoint.
+ */
 void CompileBroker::mark_on_stack() {
-  if (_c2_method_queue != NULL) {
-    _c2_method_queue->mark_on_stack();
+  assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+  // Since we are at a safepoint, we do not need a lock to access
+  // the compile queues.
+  if (_c2_compile_queue != NULL) {
+    _c2_compile_queue->mark_on_stack();
   }
-  if (_c1_method_queue != NULL) {
-    _c1_method_queue->mark_on_stack();
+  if (_c1_compile_queue != NULL) {
+    _c1_compile_queue->mark_on_stack();
   }
 }
 
@@ -1031,7 +1088,7 @@
                                         const char* comment,
                                         Thread* thread) {
   // do nothing if compiler thread(s) is not available
-  if (!_initialized ) {
+  if (!_initialized) {
     return;
   }
 
@@ -1078,7 +1135,7 @@
 
   // If this method is already in the compile queue, then
   // we do not block the current thread.
-  if (compilation_is_in_queue(method, osr_bci)) {
+  if (compilation_is_in_queue(method)) {
     // We may want to decay our counter a bit here to prevent
     // multiple denied requests for compilation.  This is an
     // open compilation policy issue. Note: The other possibility,
@@ -1111,7 +1168,7 @@
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
     // Here we need to be more careful, see 14012000 below.
-    if (compilation_is_in_queue(method, osr_bci)) {
+    if (compilation_is_in_queue(method)) {
       return;
     }
 
@@ -1132,7 +1189,7 @@
     }
 
     // Should this thread wait for completion of the compile?
-    blocking = is_compile_blocking(method, osr_bci);
+    blocking = is_compile_blocking();
 
     // We will enter the compilation in the queue.
     // 14012000: Note that this sets the queued_for_compile bits in
@@ -1324,19 +1381,17 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::compilation_is_in_queue
-//
-// See if this compilation is already requested.
-//
-// Implementation note: there is only a single "is in queue" bit
-// for each method.  This means that the check below is overly
-// conservative in the sense that an osr compilation in the queue
-// will block a normal compilation from entering the queue (and vice
-// versa).  This can be remedied by a full queue search to disambiguate
-// cases.  If it is deemed profitible, this may be done.
-bool CompileBroker::compilation_is_in_queue(methodHandle method,
-                                            int          osr_bci) {
+/**
+ * See if this compilation is already requested.
+ *
+ * Implementation note: there is only a single "is in queue" bit
+ * for each method.  This means that the check below is overly
+ * conservative in the sense that an osr compilation in the queue
+ * will block a normal compilation from entering the queue (and vice
+ * versa).  This can be remedied by a full queue search to disambiguate
+ * cases.  If it is deemed profitable, this may be done.
+ */
+bool CompileBroker::compilation_is_in_queue(methodHandle method) {
   return method->queued_for_compilation();
 }
 
@@ -1416,13 +1471,11 @@
 #endif
 }
 
-
-// ------------------------------------------------------------------
-// CompileBroker::is_compile_blocking
-//
-// Should the current thread be blocked until this compilation request
-// has been fulfilled?
-bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
+/**
+ * Should the current thread block until this compilation request
+ * has been fulfilled?
+ */
+bool CompileBroker::is_compile_blocking() {
   assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
   return !BackgroundCompilation;
 }
@@ -1450,7 +1503,7 @@
                                               int           hot_count,
                                               const char*   comment,
                                               bool          blocking) {
-  CompileTask* new_task = allocate_task();
+  CompileTask* new_task = CompileTask::allocate();
   new_task->initialize(compile_id, method, osr_bci, comp_level,
                        hot_method, hot_count, comment,
                        blocking);
@@ -1459,75 +1512,52 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::allocate_task
-//
-// Allocate a CompileTask, from the free list if possible.
-CompileTask* CompileBroker::allocate_task() {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  CompileTask* task = NULL;
-  if (_task_free_list != NULL) {
-    task = _task_free_list;
-    _task_free_list = task->next();
-    task->set_next(NULL);
-  } else {
-    task = new CompileTask();
-    task->set_next(NULL);
-  }
-  return task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::free_task
-//
-// Add a task to the free list.
-void CompileBroker::free_task(CompileTask* task) {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  task->free();
-  task->set_next(_task_free_list);
-  _task_free_list = task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::wait_for_completion
-//
-// Wait for the given method CompileTask to complete.
+/**
+ *  Wait for the compilation task to complete.
+ */
 void CompileBroker::wait_for_completion(CompileTask* task) {
   if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
     tty->print_cr("BLOCKING FOR COMPILE");
   }
 
   assert(task->is_blocking(), "can only wait on blocking task");
 
-  JavaThread *thread = JavaThread::current();
+  JavaThread* thread = JavaThread::current();
   thread->set_blocked_on_compilation(true);
 
   methodHandle method(thread, task->method());
   {
     MutexLocker waiter(task->lock(), thread);
 
-    while (!task->is_complete())
+    while (!task->is_complete() && !is_compilation_disabled_forever()) {
       task->lock()->wait();
+    }
   }
+
+  thread->set_blocked_on_compilation(false);
+  if (is_compilation_disabled_forever()) {
+    CompileTask::free(task);
+    return;
+  }
+
   // It is harmless to check this status without the lock, because
   // completion is a stable property (until the task object is recycled).
   assert(task->is_complete(), "Compilation should have completed");
   assert(task->code_handle() == NULL, "must be reset");
 
-  thread->set_blocked_on_compilation(false);
-
   // By convention, the waiter is responsible for recycling a
   // blocking CompileTask. Since there is only one waiter ever
   // waiting on a CompileTask, we know that no one else will
   // be using this CompileTask; we can free it.
-  free_task(task);
+  CompileTask::free(task);
 }
 
-// Initialize compiler thread(s) + compiler object(s). The postcondition
-// of this function is that the compiler runtimes are initialized and that
-//compiler threads can start compiling.
+/**
+ * Initialize compiler thread(s) + compiler object(s). The postcondition
+ * of this function is that the compiler runtimes are initialized and that
+ * compiler threads can start compiling.
+ */
 bool CompileBroker::init_compiler_runtime() {
   CompilerThread* thread = CompilerThread::current();
   AbstractCompiler* comp = thread->compiler();
@@ -1564,7 +1594,6 @@
     disable_compilation_forever();
     // If compiler initialization failed, no compiler thread that is specific to a
     // particular compiler runtime will ever start to compile methods.
-
     shutdown_compiler_runtime(comp, thread);
     return false;
   }
@@ -1578,9 +1607,11 @@
   return true;
 }
 
-// If C1 and/or C2 initialization failed, we shut down all compilation.
-// We do this to keep things simple. This can be changed if it ever turns out to be
-// a problem.
+/**
+ * If C1 and/or C2 initialization failed, we shut down all compilation.
+ * We do this to keep things simple. This can be changed if it ever turns
+ * out to be a problem.
+ */
 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
   // Free buffer blob, if allocated
   if (thread->get_buffer_blob() != NULL) {
@@ -1592,28 +1623,25 @@
     // There are two reasons for shutting down the compiler
     // 1) compiler runtime initialization failed
     // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
-    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
+    warning("%s initialization failed. Shutting down all compilers", comp->name());
 
     // Only one thread per compiler runtime object enters here
     // Set state to shut down
     comp->set_shut_down();
 
-    MutexLocker mu(MethodCompileQueue_lock, thread);
-    CompileQueue* queue;
-    if (_c1_method_queue != NULL) {
-      _c1_method_queue->delete_all();
-      queue = _c1_method_queue;
-      _c1_method_queue = NULL;
-      delete _c1_method_queue;
+    // Delete all queued compilation tasks to make compiler threads exit faster.
+    if (_c1_compile_queue != NULL) {
+      _c1_compile_queue->free_all();
     }
 
-    if (_c2_method_queue != NULL) {
-      _c2_method_queue->delete_all();
-      queue = _c2_method_queue;
-      _c2_method_queue = NULL;
-      delete _c2_method_queue;
+    if (_c2_compile_queue != NULL) {
+      _c2_compile_queue->free_all();
     }
 
+    // Set flags so that we continue execution with using interpreter only.
+    UseCompiler    = false;
+    UseInterpreter = true;
+
     // We could delete compiler runtimes also. However, there are references to
     // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
     // fail. This can be done later if necessary.
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -40,6 +40,11 @@
   friend class VMStructs;
 
  private:
+  static CompileTask* _task_free_list;
+#ifdef ASSERT
+  static int          _num_allocated_tasks;
+#endif
+
   Monitor*     _lock;
   uint         _compile_id;
   Method*      _method;
@@ -52,7 +57,7 @@
   int          _num_inlined_bytecodes;
   nmethodLocker* _code_handle;  // holder of eventual result
   CompileTask* _next, *_prev;
-
+  bool         _is_free;
   // Fields used for logging why the compilation was initiated:
   jlong        _time_queued;  // in units of os::elapsed_counter()
   Method*      _hot_method;   // which method actually triggered this task
@@ -69,7 +74,8 @@
                   methodHandle hot_method, int hot_count, const char* comment,
                   bool is_blocking);
 
-  void free();
+  static CompileTask* allocate();
+  static void         free(CompileTask* task);
 
   int          compile_id() const                { return _compile_id; }
   Method*      method() const                    { return _method; }
@@ -98,6 +104,8 @@
   void         set_next(CompileTask* next)       { _next = next; }
   CompileTask* prev() const                      { return _prev; }
   void         set_prev(CompileTask* prev)       { _prev = prev; }
+  bool         is_free() const                   { return _is_free; }
+  void         set_is_free(bool val)             { _is_free = val; }
 
 private:
   static void  print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
@@ -213,8 +221,8 @@
 
   // Redefine Classes support
   void mark_on_stack();
-  void delete_all();
-  void         print();
+  void free_all();
+  NOT_PRODUCT (void print();)
 
   ~CompileQueue() {
     assert (is_empty(), " Compile Queue must be empty");
@@ -267,9 +275,8 @@
   static int  _last_compile_level;
   static char _last_method_compiled[name_buffer_length];
 
-  static CompileQueue* _c2_method_queue;
-  static CompileQueue* _c1_method_queue;
-  static CompileTask* _task_free_list;
+  static CompileQueue* _c2_compile_queue;
+  static CompileQueue* _c1_compile_queue;
 
   static GrowableArray<CompilerThread*>* _compiler_threads;
 
@@ -322,7 +329,7 @@
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
-  static bool is_compile_blocking      (methodHandle method, int osr_bci);
+  static bool is_compile_blocking      ();
   static void preload_classes          (methodHandle method, TRAPS);
 
   static CompileTask* create_compile_task(CompileQueue* queue,
@@ -334,8 +341,6 @@
                                           int           hot_count,
                                           const char*   comment,
                                           bool          blocking);
-  static CompileTask* allocate_task();
-  static void free_task(CompileTask* task);
   static void wait_for_completion(CompileTask* task);
 
   static void invoke_compiler_on_method(CompileTask* task);
@@ -353,8 +358,8 @@
                                   const char* comment,
                                   Thread* thread);
   static CompileQueue* compile_queue(int comp_level) {
-    if (is_c2_compile(comp_level)) return _c2_method_queue;
-    if (is_c1_compile(comp_level)) return _c1_method_queue;
+    if (is_c2_compile(comp_level)) return _c2_compile_queue;
+    if (is_c1_compile(comp_level)) return _c1_compile_queue;
     return NULL;
   }
   static bool init_compiler_runtime();
@@ -372,7 +377,7 @@
     return NULL;
   }
 
-  static bool compilation_is_in_queue(methodHandle method, int osr_bci);
+  static bool compilation_is_in_queue(methodHandle method);
   static int queue_size(int comp_level) {
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -467,7 +467,6 @@
   assert(cb != NULL, "no codeblob");
 
   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
-  const int max_saved_on_entry_reg_count = ConcreteRegisterImpl::number_of_registers;
   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
          "already updated this map; do not 'update' it twice!" );
   debug_only(reg_map->_update_for_id = fr->id());
@@ -477,27 +476,20 @@
           !cb->caller_must_gc_arguments(reg_map->thread())),
          "include_argument_oops should already be set");
 
-  int nof_callee = 0;
-  oop*        locs[2*max_saved_on_entry_reg_count+1];
-  VMReg regs[2*max_saved_on_entry_reg_count+1];
-  // ("+1" because max_saved_on_entry_reg_count might be zero)
-
   // Scan through oopmap and find location of all callee-saved registers
   // (we do not do update in place, since info could be overwritten)
 
   address pc = fr->pc();
-
   OopMap* map  = cb->oop_map_for_return_address(pc);
-
-  assert(map != NULL, " no ptr map found");
+  assert(map != NULL, "no ptr map found");
+  DEBUG_ONLY(int nof_callee = 0;)
 
-  OopMapValue omv;
-  for(OopMapStream oms(map,OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
-    omv = oms.current();
-    assert(nof_callee < 2*max_saved_on_entry_reg_count, "overflow");
-    regs[nof_callee] = omv.content_reg();
-    locs[nof_callee] = fr->oopmapreg_to_location(omv.reg(),reg_map);
-    nof_callee++;
+  for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
+    OopMapValue omv = oms.current();
+    VMReg reg = omv.content_reg();
+    oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
+    reg_map->set_location(reg, (address) loc);
+    DEBUG_ONLY(nof_callee++;)
   }
 
   // Check that runtime stubs save all callee-saved registers
@@ -506,11 +498,6 @@
          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
          "must save all");
 #endif // COMPILER2
-
-  // Copy found callee-saved register to reg_map
-  for(int i = 0; i < nof_callee; i++) {
-    reg_map->set_location(regs[i], (address)locs[i]);
-  }
 }
 
 //=============================================================================
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -158,7 +158,7 @@
                  " coal_deaths(" SIZE_FORMAT ")"
                  " + count(" SSIZE_FORMAT ")",
                  this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
-                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
+                 _allocation_stats.coal_births(), _allocation_stats.split_deaths(),
                  _allocation_stats.coal_deaths(), count()));
 }
 #endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,7 +27,7 @@
 
 #include "memory/collectorPolicy.hpp"
 
-class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy {
+class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
  protected:
   void initialize_alignments();
   void initialize_generations();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -39,6 +39,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/copy.hpp"
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -59,6 +59,7 @@
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/memoryService.hpp"
 #include "services/runtimeService.hpp"
@@ -1560,7 +1561,7 @@
   // this is not likely to be productive in practice because it's probably too
   // late anyway.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
+  assert(gch->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
     if (Verbose && PrintGCDetails) {
@@ -1964,7 +1965,7 @@
   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
   // or if an incremental collection has failed
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
+  assert(gch->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -223,12 +223,6 @@
   }
 }
 
-void ConcurrentMarkSweepThread::print_on(outputStream* st) const {
-  st->print("\"%s\" ", name());
-  Thread::print_on(st);
-  st->cr();
-}
-
 void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
   if (_cmst != NULL) {
     _cmst->print_on(st);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -94,8 +94,6 @@
   static void threads_do(ThreadClosure* tc);
 
   // Printing
-  void print_on(outputStream* st) const;
-  void print() const                                  { print_on(tty); }
   static void print_all_on(outputStream* st);
   static void print_all()                             { print_all_on(tty); }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -58,6 +58,9 @@
   }
   initialize();
   create_and_start();
+
+  // set name
+  set_name("G1 Concurrent Refinement Thread#%d", worker_id);
 }
 
 void ConcurrentG1RefineThread::initialize() {
@@ -247,12 +250,3 @@
   }
 }
 
-void ConcurrentG1RefineThread::print() const {
-  print_on(tty);
-}
-
-void ConcurrentG1RefineThread::print_on(outputStream* st) const {
-  st->print("\"G1 Concurrent Refinement Thread#%d\" ", _worker_id);
-  Thread::print_on(st);
-  st->cr();
-}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -77,10 +77,6 @@
 
   void initialize();
 
-  // Printing
-  void print() const;
-  void print_on(outputStream* st) const;
-
   // Total virtual time so far.
   double vtime_accum() { return _vtime_accum; }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -46,6 +46,8 @@
   _in_progress(false),
   _vtime_accum(0.0),
   _vtime_mark_accum(0.0) {
+
+  set_name("G1 Main Concurrent Mark GC Thread");
   create_and_start();
 }
 
@@ -322,16 +324,6 @@
   }
 }
 
-void ConcurrentMarkThread::print() const {
-  print_on(tty);
-}
-
-void ConcurrentMarkThread::print_on(outputStream* st) const {
-  st->print("\"G1 Main Concurrent Mark GC Thread\" ");
-  Thread::print_on(st);
-  st->cr();
-}
-
 void ConcurrentMarkThread::sleepBeforeNextCycle() {
   // We join here because we don't want to do the "shouldConcurrentMark()"
   // below while the world is otherwise stopped.
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -60,10 +60,6 @@
   static void makeSurrogateLockerThread(TRAPS);
   static SurrogateLockerThread* slt() { return _slt; }
 
-  // Printing
-  void print_on(outputStream* st) const;
-  void print() const;
-
   // Total virtual time so far.
   double vtime_accum();
   // Marking virtual time so far
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
 HeapRegion* G1AllocRegion::_dummy_region = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -56,6 +56,7 @@
 #include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ticks.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1107,20 +1107,11 @@
     return _gc_time_stamp;
   }
 
-  void reset_gc_time_stamp() {
-    _gc_time_stamp = 0;
-    OrderAccess::fence();
-    // Clear the cached CSet starting regions and time stamps.
-    // Their validity is dependent on the GC timestamp.
-    clear_cset_start_regions();
-  }
+  inline void reset_gc_time_stamp();
 
   void check_gc_time_stamps() PRODUCT_RETURN;
 
-  void increment_gc_time_stamp() {
-    ++_gc_time_stamp;
-    OrderAccess::fence();
-  }
+  inline void increment_gc_time_stamp();
 
   // Reset the given region's GC timestamp. If it's starts humongous,
   // also reset the GC timestamp of its corresponding
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,6 +33,7 @@
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
 // Inline functions for G1CollectedHeap
@@ -60,6 +61,19 @@
   return hr;
 }
 
+inline void G1CollectedHeap::reset_gc_time_stamp() {
+  _gc_time_stamp = 0;
+  OrderAccess::fence();
+  // Clear the cached CSet starting regions and time stamps.
+  // Their validity is dependent on the GC timestamp.
+  clear_cset_start_regions();
+}
+
+inline void G1CollectedHeap::increment_gc_time_stamp() {
+  ++_gc_time_stamp;
+  OrderAccess::fence();
+}
+
 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
   _old_set.remove(hr);
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,6 +27,7 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -53,12 +53,6 @@
   return _thread;
 }
 
-void G1StringDedupThread::print_on(outputStream* st) const {
-  st->print("\"%s\" ", name());
-  Thread::print_on(st);
-  st->cr();
-}
-
 void G1StringDedupThread::run() {
   G1StringDedupStat total_stat;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -52,7 +52,6 @@
   static G1StringDedupThread* thread();
 
   virtual void run();
-  virtual void print_on(outputStream* st) const;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1STRINGDEDUPTHREAD_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,6 +33,7 @@
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 int    HeapRegion::LogOfHRGrainBytes = 0;
 int    HeapRegion::LogOfHRGrainWords = 0;
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,6 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/virtualspace.hpp"
 #include "runtime/vmThread.hpp"
 
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -50,7 +50,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/workgroup.hpp"
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -30,6 +30,7 @@
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 //
 // GCTask
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -87,12 +87,6 @@
   _time_stamp_index = 0;
 }
 
-void GCTaskThread::print_on(outputStream* st) const {
-  st->print("\"%s\" ", name());
-  Thread::print_on(st);
-  st->cr();
-}
-
 // GC workers get tasks from the GCTaskManager and execute
 // them in this method.  If there are no tasks to execute,
 // the GC workers wait in the GCTaskManager's get_task()
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -69,8 +69,6 @@
   void start();
 
   void print_task_time_stamps();
-  void print_on(outputStream* st) const;
-  void print() const                                { print_on(tty); }
 
 protected:
   // Constructor.  Clients use factory, but there could be subclasses.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -45,7 +45,7 @@
 
 void GenerationSizer::initialize_flags() {
   // Do basic sizing work
-  TwoGenerationCollectorPolicy::initialize_flags();
+  GenCollectorPolicy::initialize_flags();
 
   assert(UseSerialGC ||
           !FLAG_IS_DEFAULT(ParallelGCThreads) ||
@@ -79,7 +79,7 @@
     // Redo everything from the start
     initialize_flags();
   }
-  TwoGenerationCollectorPolicy::initialize_size_info();
+  GenCollectorPolicy::initialize_size_info();
 
   trace_gen_sizes("ps heap rnd");
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,9 +28,9 @@
 #include "memory/collectorPolicy.hpp"
 
 // There is a nice batch of tested generation sizing code in
-// TwoGenerationCollectorPolicy. Lets reuse it!
+// GenCollectorPolicy. Lets reuse it!
 
-class GenerationSizer : public TwoGenerationCollectorPolicy {
+class GenerationSizer : public GenCollectorPolicy {
  private:
 
   void trace_gen_sizes(const char* const str);
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -41,43 +41,10 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/threadCritical.hpp"
 #include "utilities/exceptions.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
-
 
 // no precompiled headers
 #ifdef CC_INTERP
@@ -507,25 +474,7 @@
 
 #ifdef ASSERT
   if (istate->_msg != initialize) {
-    // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
-    // because in that case, EnableInvokeDynamic is true by default but will be later switched off
-    // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
-    // for the old JSR292 implementation.
-    // This leads to a situation where 'istate->_stack_limit' always accounts for
-    // methodOopDesc::extra_stack_entries() because it is computed in
-    // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
-    // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
-    // account for extra_stack_entries() anymore because at the time when it is called
-    // EnableInvokeDynamic was already set to false.
-    // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
-    // switched off because of the wrong classes.
-    if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
-      assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
-    } else {
-      const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
-      assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
-                                                                                               + 1), "bad stack limit");
-    }
+    assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
 #ifndef SHARK
     IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
 #endif // !SHARK
@@ -2458,15 +2407,6 @@
 
       CASE(_invokedynamic): {
 
-        if (!EnableInvokeDynamic) {
-          // We should not encounter this bytecode if !EnableInvokeDynamic.
-          // The verifier will stop it.  However, if we get past the verifier,
-          // this will stop the thread in a reasonable way, without crashing the JVM.
-          CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
-                  handle_exception);
-          ShouldNotReachHere();
-        }
-
         u4 index = Bytes::get_native_u4(pc+1);
         ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
 
@@ -2501,10 +2441,6 @@
 
       CASE(_invokehandle): {
 
-        if (!EnableInvokeDynamic) {
-          ShouldNotReachHere();
-        }
-
         u2 index = Bytes::get_native_u2(pc+1);
         ConstantPoolCacheEntry* cache = cp->entry_at(index);
 
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -224,7 +224,7 @@
     _invokespecial        = 183, // 0xb7
     _invokestatic         = 184, // 0xb8
     _invokeinterface      = 185, // 0xb9
-    _invokedynamic        = 186, // 0xba     // if EnableInvokeDynamic
+    _invokedynamic        = 186, // 0xba
     _new                  = 187, // 0xbb
     _newarray             = 188, // 0xbc
     _anewarray            = 189, // 0xbd
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -769,7 +769,6 @@
 
 // First time execution:  Resolve symbols, create a permanent MethodType object.
 IRT_ENTRY(void, InterpreterRuntime::resolve_invokehandle(JavaThread* thread)) {
-  assert(EnableInvokeDynamic, "");
   const Bytecodes::Code bytecode = Bytecodes::_invokehandle;
 
   // resolve method
@@ -789,7 +788,6 @@
 
 // First time execution:  Resolve symbols, create a permanent CallSite object.
 IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
-  assert(EnableInvokeDynamic, "");
   const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
 
   //TO DO: consider passing BCI to Java.
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -270,7 +270,7 @@
     }
   }
 
-  if (checkpolymorphism && EnableInvokeDynamic && result_oop != NULL) {
+  if (checkpolymorphism && result_oop != NULL) {
     vmIntrinsics::ID iid = result_oop->intrinsic_id();
     if (MethodHandles::is_signature_polymorphic(iid)) {
       // Do not link directly to these.  The VM must produce a synthetic one using lookup_polymorphic_method.
@@ -345,8 +345,7 @@
                   vmIntrinsics::name_at(iid), klass->external_name(),
                   name->as_C_string(), full_signature->as_C_string());
   }
-  if (EnableInvokeDynamic &&
-      klass() == SystemDictionary::MethodHandle_klass() &&
+  if (klass() == SystemDictionary::MethodHandle_klass() &&
       iid != vmIntrinsics::_none) {
     if (MethodHandles::is_signature_polymorphic_intrinsic(iid)) {
       // Most of these do not need an up-call to Java to resolve, so can be done anywhere.
@@ -1543,7 +1542,6 @@
 
 
 void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
-  assert(EnableInvokeDynamic, "");
   // This guy is reached from InterpreterRuntime::resolve_invokehandle.
   KlassHandle  resolved_klass;
   Symbol* method_name = NULL;
@@ -1575,8 +1573,6 @@
 
 
 void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) {
-  assert(EnableInvokeDynamic, "");
-
   //resolve_pool(<resolved_klass>, method_name, method_signature, current_klass, pool, index, CHECK);
   Symbol* method_name       = pool->name_ref_at(index);
   Symbol* method_signature  = pool->signature_ref_at(index);
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@
   // chunk.
 
   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
-  if (surplus() <= 0) {
+  if (curTL->surplus() <= 0) {
     /* Use the hint to find a size with a surplus, and reset the hint. */
     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
     while (hintTL->hint() != 0) {
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -200,6 +200,9 @@
     _initial_gen0_size(0),
     _max_gen0_size(0),
     _gen_alignment(0),
+    _min_gen1_size(0),
+    _initial_gen1_size(0),
+    _max_gen1_size(0),
     _generations(NULL)
 {}
 
@@ -238,10 +241,6 @@
   assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
   assert(NewSize % _gen_alignment == 0, "NewSize alignment");
   assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment");
-}
-
-void TwoGenerationCollectorPolicy::assert_flags() {
-  GenCollectorPolicy::assert_flags();
   assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
   assert(OldSize % _gen_alignment == 0, "OldSize alignment");
 }
@@ -252,6 +251,7 @@
   assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
   assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage");
   assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage");
+  assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage");
   assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes");
   assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes");
   assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
@@ -263,11 +263,6 @@
       "Ergonomics made initial young generation larger than initial heap");
   assert(_max_gen0_size <= bound_minus_alignment(_max_gen0_size, _max_heap_byte_size),
       "Ergonomics made maximum young generation lager than maximum heap");
-}
-
-void TwoGenerationCollectorPolicy::assert_size_info() {
-  GenCollectorPolicy::assert_size_info();
-  assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage");
   assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes");
   assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes");
   assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
@@ -369,12 +364,6 @@
     vm_exit_during_initialization("Invalid young gen ratio specified");
   }
 
-  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
-}
-
-void TwoGenerationCollectorPolicy::initialize_flags() {
-  GenCollectorPolicy::initialize_flags();
-
   if (!is_size_aligned(OldSize, _gen_alignment)) {
     // Setting OldSize directly to preserve information about the possible
     // setting of OldSize on the command line.
@@ -433,7 +422,7 @@
 
   always_do_update_barrier = UseConcMarkSweepGC;
 
-  DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
+  DEBUG_ONLY(GenCollectorPolicy::assert_flags();)
 }
 
 // Values set on the command line win over any ergonomically
@@ -445,6 +434,13 @@
 // themselves and with overall heap sizings.
 // In the absence of explicitly set command line flags, policies
 // such as the use of NewRatio are used to size the generation.
+
+// Minimum sizes of the generations may be different than
+// the initial sizes.  An inconsistency is permitted here
+// in the total size that can be specified explicitly by
+// command line specification of OldSize and NewSize and
+// also a command line specification of -Xms.  Issue a warning
+// but allow the values to pass.
 void GenCollectorPolicy::initialize_size_info() {
   CollectorPolicy::initialize_size_info();
 
@@ -520,19 +516,6 @@
       _min_gen0_size, _initial_gen0_size, _max_gen0_size);
   }
 
-  DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
-}
-
-// Minimum sizes of the generations may be different than
-// the initial sizes.  An inconsistency is permitted here
-// in the total size that can be specified explicitly by
-// command line specification of OldSize and NewSize and
-// also a command line specification of -Xms.  Issue a warning
-// but allow the values to pass.
-
-void TwoGenerationCollectorPolicy::initialize_size_info() {
-  GenCollectorPolicy::initialize_size_info();
-
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of gen0 have been determined.
   // The maximum gen1 size can be determined from the maximum gen0
@@ -625,7 +608,7 @@
       _min_gen1_size, _initial_gen1_size, _max_gen1_size);
   }
 
-  DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();)
+  DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 }
 
 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -47,7 +47,6 @@
 
 // Forward declarations.
 class GenCollectorPolicy;
-class TwoGenerationCollectorPolicy;
 class AdaptiveSizePolicy;
 #if INCLUDE_ALL_GCS
 class ConcurrentMarkSweepPolicy;
@@ -114,7 +113,7 @@
 
   enum Name {
     CollectorPolicyKind,
-    TwoGenerationCollectorPolicyKind,
+    GenCollectorPolicyKind,
     ConcurrentMarkSweepPolicyKind,
     ASConcurrentMarkSweepPolicyKind,
     G1CollectorPolicyKind
@@ -135,7 +134,6 @@
 
   // Identification methods.
   virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
-  virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
   virtual MarkSweepPolicy*              as_mark_sweep_policy()            { return NULL; }
 #if INCLUDE_ALL_GCS
   virtual ConcurrentMarkSweepPolicy*    as_concurrent_mark_sweep_policy() { return NULL; }
@@ -143,7 +141,6 @@
 #endif // INCLUDE_ALL_GCS
   // Note that these are not virtual.
   bool is_generation_policy()            { return as_generation_policy() != NULL; }
-  bool is_two_generation_policy()        { return as_two_generation_policy() != NULL; }
   bool is_mark_sweep_policy()            { return as_mark_sweep_policy() != NULL; }
 #if INCLUDE_ALL_GCS
   bool is_concurrent_mark_sweep_policy() { return as_concurrent_mark_sweep_policy() != NULL; }
@@ -225,6 +222,9 @@
   size_t _min_gen0_size;
   size_t _initial_gen0_size;
   size_t _max_gen0_size;
+  size_t _min_gen1_size;
+  size_t _initial_gen1_size;
+  size_t _max_gen1_size;
 
   // _gen_alignment and _space_alignment will have the same value most of the
   // time. When using large pages they can differ.
@@ -264,8 +264,11 @@
   size_t initial_gen0_size() { return _initial_gen0_size; }
   size_t max_gen0_size()     { return _max_gen0_size; }
   size_t gen_alignment()     { return _gen_alignment; }
+  size_t min_gen1_size()     { return _min_gen1_size; }
+  size_t initial_gen1_size() { return _initial_gen1_size; }
+  size_t max_gen1_size()     { return _max_gen1_size; }
 
-  virtual int number_of_generations() = 0;
+  int number_of_generations() { return 2; }
 
   virtual GenerationSpec **generations() {
     assert(_generations != NULL, "Sanity check");
@@ -297,47 +300,15 @@
   virtual void post_heap_initialize() {
     assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info");
   }
-};
 
-// All of hotspot's current collectors are subtypes of this
-// class. Currently, these collectors all use the same gen[0],
-// but have different gen[1] types. If we add another subtype
-// of CollectorPolicy, this class should be broken out into
-// its own file.
-
-class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
- protected:
-  size_t _min_gen1_size;
-  size_t _initial_gen1_size;
-  size_t _max_gen1_size;
-
-  void initialize_flags();
-  void initialize_size_info();
-
-  DEBUG_ONLY(void assert_flags();)
-  DEBUG_ONLY(void assert_size_info();)
-
- public:
-  TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0),
-    _initial_gen1_size(0), _max_gen1_size(0) {}
-
-  // Accessors
-  size_t min_gen1_size()     { return _min_gen1_size; }
-  size_t initial_gen1_size() { return _initial_gen1_size; }
-  size_t max_gen1_size()     { return _max_gen1_size; }
-
-  // Inherited methods
-  TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
-
-  int number_of_generations()          { return 2; }
   BarrierSet::Name barrier_set_name()  { return BarrierSet::CardTableModRef; }
 
   virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::TwoGenerationCollectorPolicyKind;
+    return CollectorPolicy::GenCollectorPolicyKind;
   }
 };
 
-class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
+class MarkSweepPolicy : public GenCollectorPolicy {
  protected:
   void initialize_alignments();
   void initialize_generations();
--- a/hotspot/src/share/vm/memory/gcLocker.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,6 +26,7 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/sharedHeap.hpp"
+#include "runtime/thread.inline.hpp"
 
 volatile jint GC_locker::_jni_lock_count = 0;
 volatile bool GC_locker::_needs_gc       = false;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -197,9 +197,8 @@
 
 void GenCollectedHeap::post_initialize() {
   SharedHeap::post_initialize();
-  TwoGenerationCollectorPolicy *policy =
-    (TwoGenerationCollectorPolicy *)collector_policy();
-  guarantee(policy->is_two_generation_policy(), "Illegal policy type");
+  GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
+  guarantee(policy->is_generation_policy(), "Illegal policy type");
   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   assert(def_new_gen->kind() == Generation::DefNew ||
          def_new_gen->kind() == Generation::ParNew ||
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -453,7 +453,7 @@
     // Assumes a 2-generation system; the first disjunct remembers if an
     // incremental collection failed, even when we thought (second disjunct)
     // that it would not.
-    assert(heap()->collector_policy()->is_two_generation_policy(),
+    assert(heap()->collector_policy()->is_generation_policy(),
            "the following definition may not be suitable for an n(>2)-generation system");
     return incremental_collection_failed() ||
            (consult_young && !get_gen(0)->collection_attempt_is_safe());
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -42,7 +42,7 @@
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/memTracker.hpp"
 #include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
--- a/hotspot/src/share/vm/memory/space.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -37,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/java.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -223,14 +223,14 @@
 
 
   // The original attempt to resolve this constant pool entry failed so find the
-  // original error and throw it again (JVMS 5.4.3).
+  // class of the original error and throw another error of the same class (JVMS 5.4.3).
+  // If there is a detail message, pass that detail message to the error constructor.
+  // The JVMS does not strictly require us to duplicate the same detail message,
+  // or any internal exception fields such as cause or stacktrace.  But since the
+  // detail message is often a class name or other literal string, we will repeat it if
+  // we can find it in the symbol table.
   if (in_error) {
-    Symbol* error = SystemDictionary::find_resolution_error(this_cp, which);
-    guarantee(error != (Symbol*)NULL, "tag mismatch with resolution error table");
-    ResourceMark rm;
-    // exception text will be the class name
-    const char* className = this_cp->unresolved_klass_at(which)->as_C_string();
-    THROW_MSG_0(error, className);
+    throw_resolution_error(this_cp, which, CHECK_0);
   }
 
   if (do_resolve) {
@@ -250,11 +250,6 @@
     // Failed to resolve class. We must record the errors so that subsequent attempts
     // to resolve this constant pool entry fail with the same error (JVMS 5.4.3).
     if (HAS_PENDING_EXCEPTION) {
-      ResourceMark rm;
-      Symbol* error = PENDING_EXCEPTION->klass()->name();
-
-      bool throw_orig_error = false;
-      {
         MonitorLockerEx ml(this_cp->lock());
 
         // some other thread has beaten us and has resolved the class.
@@ -264,32 +259,9 @@
           return entry.get_klass();
         }
 
-        if (!PENDING_EXCEPTION->
-              is_a(SystemDictionary::LinkageError_klass())) {
-          // Just throw the exception and don't prevent these classes from
-          // being loaded due to virtual machine errors like StackOverflow
-          // and OutOfMemoryError, etc, or if the thread was hit by stop()
-          // Needs clarification to section 5.4.3 of the VM spec (see 6308271)
-        }
-        else if (!this_cp->tag_at(which).is_unresolved_klass_in_error()) {
-          SystemDictionary::add_resolution_error(this_cp, which, error);
-          this_cp->tag_at_put(which, JVM_CONSTANT_UnresolvedClassInError);
-        } else {
-          // some other thread has put the class in error state.
-          error = SystemDictionary::find_resolution_error(this_cp, which);
-          assert(error != NULL, "checking");
-          throw_orig_error = true;
-        }
-      } // unlocked
-
-      if (throw_orig_error) {
-        CLEAR_PENDING_EXCEPTION;
-        ResourceMark rm;
-        const char* className = this_cp->unresolved_klass_at(which)->as_C_string();
-        THROW_MSG_0(error, className);
-      }
-
-      return 0;
+        // The tag could have changed to in-error before the lock but we have to
+        // handle that here for the class case.
+        save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_0);
     }
 
     if (TraceClassResolution && !k()->oop_is_array()) {
@@ -587,16 +559,55 @@
   return true;
 }
 
-// If resolution for MethodHandle or MethodType fails, save the exception
+Symbol* ConstantPool::exception_message(constantPoolHandle this_cp, int which, constantTag tag, oop pending_exception) {
+  // Dig out the detailed message to reuse if possible
+  Symbol* message = NULL;
+  oop detailed_message = java_lang_Throwable::message(pending_exception);
+  if (detailed_message != NULL) {
+     message = java_lang_String::as_symbol_or_null(detailed_message);
+     if (message != NULL) {
+       return message;
+     }
+  }
+
+  // Return specific message for the tag
+  switch (tag.value()) {
+  case JVM_CONSTANT_UnresolvedClass:
+    // return the class name in the error message
+    message = this_cp->unresolved_klass_at(which);
+    break;
+  case JVM_CONSTANT_MethodHandle:
+    // return the method handle name in the error message
+    message = this_cp->method_handle_name_ref_at(which);
+    break;
+  case JVM_CONSTANT_MethodType:
+    // return the method type signature in the error message
+    message = this_cp->method_type_signature_at(which);
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  return message;
+}
+
+void ConstantPool::throw_resolution_error(constantPoolHandle this_cp, int which, TRAPS) {
+  Symbol* message = NULL;
+  Symbol* error = SystemDictionary::find_resolution_error(this_cp, which, &message);
+  assert(error != NULL && message != NULL, "checking");
+  CLEAR_PENDING_EXCEPTION;
+  ResourceMark rm;
+  THROW_MSG(error, message->as_C_string());
+}
+
+// If resolution for Class, MethodHandle or MethodType fails, save the exception
 // in the resolution error table, so that the same exception is thrown again.
 void ConstantPool::save_and_throw_exception(constantPoolHandle this_cp, int which,
-                                     int tag, TRAPS) {
-  ResourceMark rm;
+                                            constantTag tag, TRAPS) {
+  assert(this_cp->lock()->is_locked(), "constant pool lock should be held");
   Symbol* error = PENDING_EXCEPTION->klass()->name();
-  MonitorLockerEx ml(this_cp->lock());  // lock cpool to change tag.
 
-  int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
-           JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
+  int error_tag = tag.error_value();
 
   if (!PENDING_EXCEPTION->
     is_a(SystemDictionary::LinkageError_klass())) {
@@ -604,20 +615,21 @@
     // being loaded due to virtual machine errors like StackOverflow
     // and OutOfMemoryError, etc, or if the thread was hit by stop()
     // Needs clarification to section 5.4.3 of the VM spec (see 6308271)
-
   } else if (this_cp->tag_at(which).value() != error_tag) {
-    SystemDictionary::add_resolution_error(this_cp, which, error);
+    Symbol* message = exception_message(this_cp, which, tag, PENDING_EXCEPTION);
+    SystemDictionary::add_resolution_error(this_cp, which, error, message);
     this_cp->tag_at_put(which, error_tag);
   } else {
-    // some other thread has put the class in error state.
-    error = SystemDictionary::find_resolution_error(this_cp, which);
-    assert(error != NULL, "checking");
-    CLEAR_PENDING_EXCEPTION;
-    THROW_MSG(error, "");
+    // some other thread put this in error state
+    throw_resolution_error(this_cp, which, CHECK);
   }
+
+  // This exits with some pending exception
+  assert(HAS_PENDING_EXCEPTION, "should not be cleared");
 }
 
 
+
 // Called to resolve constants in the constant pool and return an oop.
 // Some constant pool entries cache their resolved oop. This is also
 // called to create oops from constants to use in arguments for invokedynamic
@@ -645,9 +657,9 @@
 
   jvalue prim_value;  // temp used only in a few cases below
 
-  int tag_value = this_cp->tag_at(index).value();
+  constantTag tag = this_cp->tag_at(index);
 
-  switch (tag_value) {
+  switch (tag.value()) {
 
   case JVM_CONSTANT_UnresolvedClass:
   case JVM_CONSTANT_UnresolvedClassInError:
@@ -672,10 +684,7 @@
   case JVM_CONSTANT_MethodHandleInError:
   case JVM_CONSTANT_MethodTypeInError:
     {
-      Symbol* error = SystemDictionary::find_resolution_error(this_cp, index);
-      guarantee(error != (Symbol*)NULL, "tag mismatch with resolution error table");
-      ResourceMark rm;
-      THROW_MSG_0(error, "");
+      throw_resolution_error(this_cp, index, CHECK_NULL);
       break;
     }
 
@@ -699,7 +708,8 @@
                                                                    THREAD);
       result_oop = value();
       if (HAS_PENDING_EXCEPTION) {
-        save_and_throw_exception(this_cp, index, tag_value, CHECK_NULL);
+        MonitorLockerEx ml(this_cp->lock());  // lock cpool to change tag.
+        save_and_throw_exception(this_cp, index, tag, CHECK_NULL);
       }
       break;
     }
@@ -715,7 +725,8 @@
       Handle value = SystemDictionary::find_method_handle_type(signature, klass, THREAD);
       result_oop = value();
       if (HAS_PENDING_EXCEPTION) {
-        save_and_throw_exception(this_cp, index, tag_value, CHECK_NULL);
+        MonitorLockerEx ml(this_cp->lock());  // lock cpool to change tag.
+        save_and_throw_exception(this_cp, index, tag, CHECK_NULL);
       }
       break;
     }
@@ -746,7 +757,7 @@
 
   default:
     DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d",
-                              this_cp(), index, cache_index, tag_value) );
+                              this_cp(), index, cache_index, tag.value()));
     assert(false, "unexpected constant tag");
     break;
   }
@@ -1827,9 +1838,7 @@
 // We can't do this during classfile parsing, which is how the other indexes are
 // patched.  The other patches are applied early for some error checking
 // so only defer the pseudo_strings.
-void ConstantPool::patch_resolved_references(
-                                            GrowableArray<Handle>* cp_patches) {
-  assert(EnableInvokeDynamic, "");
+void ConstantPool::patch_resolved_references(GrowableArray<Handle>* cp_patches) {
   for (int index = 1; index < cp_patches->length(); index++) { // Index 0 is unused
     Handle patch = cp_patches->at(index);
     if (patch.not_null()) {
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -413,9 +413,9 @@
   // Version that can be used before string oop array is created.
   oop uncached_string_at(int which, TRAPS);
 
-  // A "pseudo-string" is an non-string oop that has found is way into
+  // A "pseudo-string" is an non-string oop that has found its way into
   // a String entry.
-  // Under EnableInvokeDynamic this can happen if the user patches a live
+  // This can happen if the user patches a live
   // object into a CONSTANT_String entry of an anonymous class.
   // Method oops internally created for method handles may also
   // use pseudo-strings to link themselves to related metaobjects.
@@ -441,7 +441,6 @@
   }
 
   void pseudo_string_at_put(int which, int obj_index, oop x) {
-    assert(EnableInvokeDynamic, "");
     assert(tag_at(which).is_string(), "Corrupted constant pool");
     unresolved_string_at_put(which, NULL); // indicates patched string
     string_at_put(which, obj_index, x);    // this works just fine
@@ -823,9 +822,13 @@
   static void resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS);
 
   static oop resolve_constant_at_impl(constantPoolHandle this_cp, int index, int cache_index, TRAPS);
-  static void save_and_throw_exception(constantPoolHandle this_cp, int which, int tag_value, TRAPS);
   static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_cp, int index, TRAPS);
 
+  // Exception handling
+  static void throw_resolution_error(constantPoolHandle this_cp, int which, TRAPS);
+  static Symbol* exception_message(constantPoolHandle this_cp, int which, constantTag tag, oop pending_exception);
+  static void save_and_throw_exception(constantPoolHandle this_cp, int which, constantTag tag, TRAPS);
+
  public:
   // Merging ConstantPool* support:
   bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,6 +33,7 @@
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 # include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
--- a/hotspot/src/share/vm/oops/cpCache.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,6 +27,7 @@
 
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/array.hpp"
 
 class PSPromotionManager;
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -55,6 +55,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
@@ -2398,7 +2399,6 @@
 
   // If this is an anonymous class, append a hash to make the name unique
   if (is_anonymous()) {
-    assert(EnableInvokeDynamic, "EnableInvokeDynamic was not set.");
     intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0;
     sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
     hash_len = (int)strlen(hash_buf);
--- a/hotspot/src/share/vm/oops/klass.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -36,7 +36,8 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline2.hpp"
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/stack.hpp"
 #include "utilities/macros.hpp"
@@ -544,7 +545,6 @@
   if (oop_is_instance()) {
     InstanceKlass* ik = (InstanceKlass*) this;
     if (ik->is_anonymous()) {
-      assert(EnableInvokeDynamic, "");
       intptr_t hash = 0;
       if (ik->java_mirror() != NULL) {
         // java_mirror might not be created yet, return 0 as hash.
--- a/hotspot/src/share/vm/oops/klass.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,7 +32,6 @@
 #include "oops/klassPS.hpp"
 #include "oops/metadata.hpp"
 #include "oops/oop.hpp"
-#include "runtime/orderAccess.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/oops/method.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -49,6 +49,7 @@
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/relocator.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
--- a/hotspot/src/share/vm/oops/method.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -661,7 +661,7 @@
   // this operates only on invoke methods:
   // presize interpreter frames for extra interpreter stack entries, if needed
   // Account for the extra appendix argument for invokehandle/invokedynamic
-  static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
+  static int extra_stack_entries() { return extra_stack_entries_for_jsr292; }
   static int extra_stack_words();  // = extra_stack_entries() * Interpreter::stackElementSize
 
   // RedefineClasses() support:
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -34,6 +34,7 @@
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 // ==================================================================
 // DataLayout
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -42,6 +42,7 @@
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -38,7 +38,8 @@
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
 #ifdef TARGET_ARCH_x86
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -39,6 +39,7 @@
 #include "oops/typeArrayKlass.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/macros.hpp"
 
 bool TypeArrayKlass::compute_is_subtype_of(Klass* k) {
--- a/hotspot/src/share/vm/oops/typeArrayOop.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayOop.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,39 +27,7 @@
 
 #include "oops/arrayOop.hpp"
 #include "oops/typeArrayKlass.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
+#include "runtime/orderAccess.inline.hpp"
 
 // A typeArrayOop is an array containing basic types (non oop elements).
 // It is used for arrays of {characters, singles, doubles, bytes, shorts, integers, longs}
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -704,6 +704,7 @@
 #endif
   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
+  set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
 
   if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
     // Make sure the method being compiled gets its own MDO,
@@ -988,6 +989,8 @@
   set_print_assembly(PrintFrameConverterAssembly);
   set_parsed_irreducible_loop(false);
 #endif
+  set_has_irreducible_loop(false); // no loops
+
   CompileWrapper cw(this);
   Init(/*AliasLevel=*/ 0);
   init_tf((*generator)());
@@ -1158,7 +1161,7 @@
     if( start->is_Start() )
       return start->as_Start();
   }
-  ShouldNotReachHere();
+  fatal("Did not find Start node!");
   return NULL;
 }
 
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -319,6 +319,7 @@
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 #endif
+  bool                  _has_irreducible_loop;  // Found irreducible loops
   // JSR 292
   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
@@ -604,6 +605,8 @@
   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
   int _in_dump_cnt;  // Required for dumping ir nodes.
 #endif
+  bool              has_irreducible_loop() const { return _has_irreducible_loop; }
+  void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
 
   // JSR 292
   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1509,6 +1509,8 @@
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+  } else if (require_atomic_access && bt == T_DOUBLE) {
+    ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
   } else {
     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
   }
@@ -1531,6 +1533,8 @@
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+  } else if (require_atomic_access && bt == T_DOUBLE) {
+    st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
   } else {
     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -222,7 +222,7 @@
   bool inline_math_subtractExactL(bool is_decrement);
   bool inline_exp();
   bool inline_pow();
-  void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
+  Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
   bool inline_min_max(vmIntrinsics::ID id);
   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
@@ -1686,7 +1686,7 @@
   return true;
 }
 
-void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
+Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
   //-------------------
   //result=(result.isNaN())? funcAddr():result;
   // Check: If isNaN() by checking result!=result? then either trap
@@ -1702,7 +1702,7 @@
       uncommon_trap(Deoptimization::Reason_intrinsic,
                     Deoptimization::Action_make_not_entrant);
     }
-    set_result(result);
+    return result;
   } else {
     // If this inlining ever returned NaN in the past, we compile a call
     // to the runtime to properly handle corner cases
@@ -1732,9 +1732,10 @@
 
       result_region->init_req(2, control());
       result_val->init_req(2, value);
-      set_result(result_region, result_val);
+      set_control(_gvn.transform(result_region));
+      return _gvn.transform(result_val);
     } else {
-      set_result(result);
+      return result;
     }
   }
 }
@@ -1746,7 +1747,8 @@
   Node* arg = round_double_node(argument(0));
   Node* n   = _gvn.transform(new (C) ExpDNode(C, control(), arg));
 
-  finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+  n = finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+  set_result(n);
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
   return true;
@@ -1756,27 +1758,48 @@
 // Inline power instructions, if possible.
 bool LibraryCallKit::inline_pow() {
   // Pseudocode for pow
-  // if (x <= 0.0) {
-  //   long longy = (long)y;
-  //   if ((double)longy == y) { // if y is long
-  //     if (y + 1 == y) longy = 0; // huge number: even
-  //     result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
+  // if (y == 2) {
+  //   return x * x;
+  // } else {
+  //   if (x <= 0.0) {
+  //     long longy = (long)y;
+  //     if ((double)longy == y) { // if y is long
+  //       if (y + 1 == y) longy = 0; // huge number: even
+  //       result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
+  //     } else {
+  //       result = NaN;
+  //     }
   //   } else {
-  //     result = NaN;
+  //     result = DPow(x,y);
   //   }
-  // } else {
-  //   result = DPow(x,y);
+  //   if (result != result)?  {
+  //     result = uncommon_trap() or runtime_call();
+  //   }
+  //   return result;
   // }
-  // if (result != result)?  {
-  //   result = uncommon_trap() or runtime_call();
-  // }
-  // return result;
 
   Node* x = round_double_node(argument(0));
   Node* y = round_double_node(argument(2));
 
   Node* result = NULL;
 
+  Node*   const_two_node = makecon(TypeD::make(2.0));
+  Node*   cmp_node       = _gvn.transform(new (C) CmpDNode(y, const_two_node));
+  Node*   bool_node      = _gvn.transform(new (C) BoolNode(cmp_node, BoolTest::eq));
+  IfNode* if_node        = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
+  Node*   if_true        = _gvn.transform(new (C) IfTrueNode(if_node));
+  Node*   if_false       = _gvn.transform(new (C) IfFalseNode(if_node));
+
+  RegionNode* region_node = new (C) RegionNode(3);
+  region_node->init_req(1, if_true);
+
+  Node* phi_node = new (C) PhiNode(region_node, Type::DOUBLE);
+  // special case for x^y where y == 2, we can convert it to x * x
+  phi_node->init_req(1, _gvn.transform(new (C) MulDNode(x, x)));
+
+  // set control to if_false since we will now process the false branch
+  set_control(if_false);
+
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
     // Short form: skip the fancy tests and just check for NaN result.
     result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
@@ -1900,7 +1923,15 @@
     result = _gvn.transform(phi);
   }
 
-  finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+  result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+
+  // control from finish_pow_exp is now input to the region node
+  region_node->set_req(2, control());
+  // the result from finish_pow_exp is now input to the phi node
+  phi_node->init_req(2, result);
+  set_control(_gvn.transform(region_node));
+  record_for_igvn(region_node);
+  set_result(_gvn.transform(phi_node));
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
   return true;
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -267,9 +267,9 @@
 
   // Counted loop head must be a good RegionNode with only 3 not NULL
   // control input edges: Self, Entry, LoopBack.
-  if (x->in(LoopNode::Self) == NULL || x->req() != 3)
+  if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) {
     return false;
-
+  }
   Node *init_control = x->in(LoopNode::EntryControl);
   Node *back_control = x->in(LoopNode::LoopBackControl);
   if (init_control == NULL || back_control == NULL)    // Partially dead
@@ -1523,11 +1523,11 @@
 
   // If I have one hot backedge, peel off myself loop.
   // I better be the outermost loop.
-  if( _head->req() > 3 ) {
+  if (_head->req() > 3 && !_irreducible) {
     split_outer_loop( phase );
     result = true;
 
-  } else if( !_head->is_Loop() && !_irreducible ) {
+  } else if (!_head->is_Loop() && !_irreducible) {
     // Make a new LoopNode to replace the old loop head
     Node *l = new (phase->C) LoopNode( _head->in(1), _head->in(2) );
     l = igvn.register_new_node_with_optimizer(l, _head);
@@ -2939,6 +2939,7 @@
           return pre_order;
         }
       }
+      C->set_has_irreducible_loop(_has_irreducible_loops);
     }
 
     // This Node might be a decision point for loops.  It is only if
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -308,33 +308,16 @@
     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
   }
 
-#ifdef ASSERT
   Node* base = NULL;
-  if (address->is_AddP())
+  if (address->is_AddP()) {
     base = address->in(AddPNode::Base);
+  }
   if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
       !t_adr->isa_rawptr()) {
     // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
-    Compile* C = phase->C;
-    tty->cr();
-    tty->print_cr("===== NULL+offs not RAW address =====");
-    if (C->is_dead_node(this->_idx))    tty->print_cr("'this' is dead");
-    if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
-    if (C->is_dead_node(mem->_idx))     tty->print_cr("'mem' is dead");
-    if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
-    if (C->is_dead_node(base->_idx))    tty->print_cr("'base' is dead");
-    tty->cr();
-    base->dump(1);
-    tty->cr();
-    this->dump(2);
-    tty->print("this->adr_type():     "); adr_type()->dump(); tty->cr();
-    tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
-    tty->print("phase->type(base):    "); phase->type(address)->dump(); tty->cr();
-    tty->cr();
+    // Skip this node optimization if its address has TOP base.
+    return NodeSentinel; // caller will return NULL
   }
-  assert(base == NULL || t_adr->isa_rawptr() ||
-        !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
-#endif
 
   // Avoid independent memory operations
   Node* old_mem = mem;
@@ -955,6 +938,10 @@
   return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
 }
 
+LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+  bool require_atomic = true;
+  return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic);
+}
 
 
 
@@ -2397,6 +2384,11 @@
   return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
 
+StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+  bool require_atomic = true;
+  return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
+}
+
 
 //--------------------------bottom_type----------------------------------------
 const Type *StoreNode::bottom_type() const {
--- a/hotspot/src/share/vm/opto/memnode.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -332,7 +332,7 @@
   virtual uint ideal_reg() const { return Op_RegL; }
   virtual int store_Opcode() const { return Op_StoreL; }
   virtual BasicType memory_type() const { return T_LONG; }
-  bool require_atomic_access() { return _require_atomic_access; }
+  bool require_atomic_access() const { return _require_atomic_access; }
   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo);
 #ifndef PRODUCT
@@ -367,13 +367,31 @@
 //------------------------------LoadDNode--------------------------------------
 // Load a double (64 bits) from memory
 class LoadDNode : public LoadNode {
+  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
+  virtual uint cmp( const Node &n ) const {
+    return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
+      && LoadNode::cmp(n);
+  }
+  virtual uint size_of() const { return sizeof(*this); }
+  const bool _require_atomic_access;  // is piecewise load forbidden?
+
 public:
-  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
-    : LoadNode(c, mem, adr, at, t, mo) {}
+  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
+            MemOrd mo, bool require_atomic_access = false)
+    : LoadNode(c, mem, adr, at, t, mo), _require_atomic_access(require_atomic_access) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegD; }
   virtual int store_Opcode() const { return Op_StoreD; }
   virtual BasicType memory_type() const { return T_DOUBLE; }
+  bool require_atomic_access() const { return _require_atomic_access; }
+  static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+                                const Type* rt, MemOrd mo);
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const {
+    LoadNode::dump_spec(st);
+    if (_require_atomic_access)  st->print(" Atomic!");
+  }
+#endif
 };
 
 //------------------------------LoadD_unalignedNode----------------------------
@@ -574,7 +592,7 @@
     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_LONG; }
-  bool require_atomic_access() { return _require_atomic_access; }
+  bool require_atomic_access() const { return _require_atomic_access; }
   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
@@ -597,11 +615,28 @@
 //------------------------------StoreDNode-------------------------------------
 // Store double to memory
 class StoreDNode : public StoreNode {
+  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
+  virtual uint cmp( const Node &n ) const {
+    return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
+      && StoreNode::cmp(n);
+  }
+  virtual uint size_of() const { return sizeof(*this); }
+  const bool _require_atomic_access;  // is piecewise store forbidden?
 public:
-  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
-    : StoreNode(c, mem, adr, at, val, mo) {}
+  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
+             MemOrd mo, bool require_atomic_access = false)
+    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_DOUBLE; }
+  bool require_atomic_access() const { return _require_atomic_access; }
+  static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const {
+    StoreNode::dump_spec(st);
+    if (_require_atomic_access)  st->print(" Atomic!");
+  }
+#endif
+
 };
 
 //------------------------------StorePNode-------------------------------------
--- a/hotspot/src/share/vm/opto/node.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,6 +27,7 @@
 #include "memory/allocation.inline.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/node.hpp"
@@ -1263,6 +1264,7 @@
 
   Node *top = igvn->C->top();
   nstack.push(dead);
+  bool has_irreducible_loop = igvn->C->has_irreducible_loop();
 
   while (nstack.size() > 0) {
     dead = nstack.pop();
@@ -1277,13 +1279,31 @@
           assert (!use->is_Con(), "Control for Con node should be Root node.");
           use->set_req(0, top);       // Cut dead edge to prevent processing
           nstack.push(use);           // the dead node again.
+        } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
+                   use->is_Loop() && !use->is_Root() &&       // Don't kill Root (RootNode extends LoopNode)
+                   use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
+          use->set_req(LoopNode::EntryControl, top);          // Cut dead edge to prevent processing
+          use->set_req(0, top);       // Cut self edge
+          nstack.push(use);
         } else {                      // Else found a not-dead user
+          // Dead if all inputs are top or null
+          bool dead_use = !use->is_Root(); // Keep empty graph alive
           for (uint j = 1; j < use->req(); j++) {
-            if (use->in(j) == dead) { // Turn all dead inputs into TOP
+            Node* in = use->in(j);
+            if (in == dead) {         // Turn all dead inputs into TOP
               use->set_req(j, top);
+            } else if (in != NULL && !in->is_top()) {
+              dead_use = false;
             }
           }
-          igvn->_worklist.push(use);
+          if (dead_use) {
+            if (use->is_Region()) {
+              use->set_req(0, top);   // Cut self edge
+            }
+            nstack.push(use);
+          } else {
+            igvn->_worklist.push(use);
+          }
         }
         // Refresh the iterator, since any number of kills might have happened.
         k = dead->last_outs(kmin);
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -868,7 +868,7 @@
   return TypeFunc::make(domain, range);
 }
 
-// for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning void
+// for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
   // create input type (domain)
   int num_args      = 5;
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -193,6 +193,7 @@
 # include "runtime/mutexLocker.hpp"
 # include "runtime/objectMonitor.hpp"
 # include "runtime/orderAccess.hpp"
+# include "runtime/orderAccess.inline.hpp"
 # include "runtime/os.hpp"
 # include "runtime/osThread.hpp"
 # include "runtime/perfData.hpp"
--- a/hotspot/src/share/vm/prims/forte.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/forte.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,7 +32,7 @@
 #include "oops/oop.inline2.hpp"
 #include "prims/forte.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -67,6 +67,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
--- a/hotspot/src/share/vm/prims/jniCheck.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -35,7 +35,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #ifdef TARGET_ARCH_x86
 # include "jniTypes_x86.hpp"
 #endif
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -51,6 +51,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -851,7 +851,7 @@
                "sanity check");
 
         int cpci = Bytes::get_native_u2(bcp+1);
-        bool is_invokedynamic = (EnableInvokeDynamic && code == Bytecodes::_invokedynamic);
+        bool is_invokedynamic = (code == Bytecodes::_invokedynamic);
         ConstantPoolCacheEntry* entry;
         if (is_invokedynamic) {
           cpci = Bytes::get_native_u4(bcp+1);
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -41,6 +41,7 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframe_hp.hpp"
 #include "runtime/vmThread.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -47,7 +47,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "services/attachListener.hpp"
 #include "services/serviceUtil.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -25,7 +25,8 @@
 #include "precompiled.hpp"
 #include "prims/jvmtiRawMonitor.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/thread.inline.hpp"
 
 GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1,true);
 
--- a/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiThreadState.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -27,6 +27,7 @@
 
 #include "prims/jvmtiEnvThreadState.hpp"
 #include "prims/jvmtiThreadState.hpp"
+#include "runtime/thread.inline.hpp"
 
 // JvmtiEnvThreadStateIterator implementation
 
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -59,7 +59,7 @@
 // MethodHandles::generate_adapters
 //
 void MethodHandles::generate_adapters() {
-  if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL)  return;
+  if (SystemDictionary::MethodHandle_klass() == NULL)  return;
 
   assert(_adapter_code == NULL, "generate only once");
 
@@ -98,7 +98,7 @@
 
 void MethodHandles::set_enabled(bool z) {
   if (_enabled != z) {
-    guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic");
+    guarantee(z, "can only enable once");
     _enabled = z;
   }
 }
@@ -1374,11 +1374,6 @@
  * This one function is exported, used by NativeLookup.
  */
 JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
-  if (!EnableInvokeDynamic) {
-    warning("JSR 292 is disabled in this JVM.  Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable.");
-    return;  // bind nothing
-  }
-
   assert(!MethodHandles::enabled(), "must not be enabled");
   bool enable_MH = true;
 
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,6 +33,7 @@
 #include "prims/jvm.h"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/synchronizer.hpp"
 #include "services/threadService.hpp"
@@ -1727,14 +1728,10 @@
     }
 
     // Unsafe.defineAnonymousClass
-    if (EnableInvokeDynamic) {
-      register_natives("1.7 define anonymous class method", env, unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
-    }
+    register_natives("1.7 define anonymous class method", env, unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
 
     // Unsafe.shouldBeInitialized
-    if (EnableInvokeDynamic) {
-      register_natives("1.7 LambdaForm support", env, unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
-    }
+    register_natives("1.7 LambdaForm support", env, unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
 
     // Fence methods
     register_natives("1.8 fence methods", env, unsafecls, fence_methods, sizeof(fence_methods)/sizeof(JNINativeMethod));
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -449,7 +449,7 @@
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, thread);
   }
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -473,7 +473,7 @@
     CompLevel next_osr_level = loop_event(imh(), level);
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
-    if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
+    if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
       compile(imh, bci, next_osr_level, thread);
     }
 
@@ -507,7 +507,7 @@
           nm->make_not_entrant();
         }
       }
-      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+      if (!CompileBroker::compilation_is_in_queue(mh)) {
         // Fix up next_level if necessary to avoid deopts
         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
           next_level = CompLevel_full_profile;
@@ -519,7 +519,7 @@
     } else {
       cur_level = comp_level(imh());
       next_level = call_event(imh(), cur_level);
-      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
+      if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
         compile(imh, InvocationEntryBci, next_level, thread);
       }
     }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -2399,7 +2399,7 @@
   status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
 
   // TieredCompilation needs at least 2 compiler threads.
-  const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
+  const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : CI_COMPILER_COUNT;
   status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
 
   if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
@@ -3638,19 +3638,9 @@
   }
 #endif // PRODUCT
 
-  // JSR 292 is not supported before 1.7
-  if (!JDK_Version::is_gte_jdk17x_version()) {
-    if (EnableInvokeDynamic) {
-      if (!FLAG_IS_DEFAULT(EnableInvokeDynamic)) {
-        warning("JSR 292 is not supported before 1.7.  Disabling support.");
-      }
-      EnableInvokeDynamic = false;
-    }
-  }
-
-  if (EnableInvokeDynamic && ScavengeRootsInCode == 0) {
+  if (ScavengeRootsInCode == 0) {
     if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) {
-      warning("forcing ScavengeRootsInCode non-zero because EnableInvokeDynamic is true");
+      warning("forcing ScavengeRootsInCode non-zero");
     }
     ScavengeRootsInCode = 1;
   }
--- a/hotspot/src/share/vm/runtime/fprofiler.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/fprofiler.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -38,6 +38,7 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/macros.hpp"
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -3784,10 +3784,6 @@
           NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)),                           \
           "Address to allocate shared memory region for class data")        \
                                                                             \
-  diagnostic(bool, EnableInvokeDynamic, true,                               \
-          "support JSR 292 (method handles, invokedynamic, "                \
-          "anonymous classes")                                              \
-                                                                            \
   diagnostic(bool, PrintMethodHandleStubs, false,                           \
           "Print generated stub code for method handles")                   \
                                                                             \
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -30,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/preserveException.hpp"
--- a/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,39 +26,7 @@
 #define SHARE_VM_RUNTIME_JAVAFRAMEANCHOR_HPP
 
 #include "utilities/globalDefinitions.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
+#include "runtime/orderAccess.inline.hpp"
 
 //
 // An object for encapsulating the machine/os dependent part of a JavaThread frame state
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -25,6 +25,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/events.hpp"
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -32,6 +32,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/orderAccess.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
+#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
+
+#include "runtime/orderAccess.hpp"
+
+// Linux
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "orderAccess_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_sparc
+# include "orderAccess_linux_sparc.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_zero
+# include "orderAccess_linux_zero.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "orderAccess_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "orderAccess_linux_ppc.inline.hpp"
+#endif
+
+// Solaris
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "orderAccess_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_sparc
+# include "orderAccess_solaris_sparc.inline.hpp"
+#endif
+
+// Windows
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "orderAccess_windows_x86.inline.hpp"
+#endif
+
+// AIX
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
+
+// BSD
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "orderAccess_bsd_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_zero
+# include "orderAccess_bsd_zero.inline.hpp"
+#endif
+
+#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/perfMemory.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/perfMemory.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,6 +28,7 @@
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/perfMemory.hpp"
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -41,6 +41,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/signature.hpp"
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -2658,19 +2658,20 @@
 JRT_END
 
 #ifdef HAVE_DTRACE_H
-// Create a dtrace nmethod for this method.  The wrapper converts the
-// java compiled calling convention to the native convention, makes a dummy call
-// (actually nops for the size of the call instruction, which become a trap if
-// probe is enabled). The returns to the caller. Since this all looks like a
-// leaf no thread transition is needed.
-
+/**
+ * Create a dtrace nmethod for this method.  The wrapper converts the
+ * Java-compiled calling convention to the native convention, makes a dummy call
+ * (actually nops for the size of the call instruction, which become a trap if
+ * probe is enabled), and finally returns to the caller. Since this all looks like a
+ * leaf, no thread transition is needed.
+ */
 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
   ResourceMark rm;
   nmethod* nm = NULL;
 
   if (PrintCompilation) {
     ttyLocker ttyl;
-    tty->print("---   n%s  ");
+    tty->print("---   n  ");
     method->print_short_name(tty);
     if (method->is_static()) {
       tty->print(" (static)");
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -192,6 +192,10 @@
       thread->is_interp_only_mode()) {
     return NULL;
   }
+  if (CompileTheWorld || ReplayCompiles) {
+    // Don't trigger other compiles in testing mode
+    return NULL;
+  }
   nmethod *osr_nm = NULL;
 
   handle_counter_overflow(method());
@@ -235,7 +239,7 @@
   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
     return;
   }
-  if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
+  if (!CompileBroker::compilation_is_in_queue(mh)) {
     if (PrintTieredEvents) {
       print_event(COMPILE, mh, mh, bci, level);
     }
@@ -374,7 +378,7 @@
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
                                               CompLevel level, nmethod* nm, JavaThread* thread) {
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -387,7 +391,7 @@
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,8 +33,10 @@
 #include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/sweeper.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/events.hpp"
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -58,6 +58,7 @@
 #include "runtime/memprofiler.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -1198,6 +1199,13 @@
   va_end(ap);
 }
 
+void NamedThread::print_on(outputStream* st) const {
+  st->print("\"%s\" ", name());
+  Thread::print_on(st);
+  st->cr();
+}
+
+
 // ======= WatcherThread ========
 
 // The watcher thread exists to simulate timer interrupts.  It should
@@ -3602,9 +3610,7 @@
   // It is done after compilers are initialized, because otherwise compilations of
   // signature polymorphic MH intrinsics can be missed
   // (see SystemDictionary::find_method_handle_intrinsic).
-  if (EnableInvokeDynamic) {
-    initialize_jsr292_core_classes(CHECK_JNI_ERR);
-  }
+  initialize_jsr292_core_classes(CHECK_JNI_ERR);
 
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -436,18 +436,7 @@
   jlong allocated_bytes()               { return _allocated_bytes; }
   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
-  jlong cooked_allocated_bytes() {
-    jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
-    if (UseTLAB) {
-      size_t used_bytes = tlab().used_bytes();
-      if ((ssize_t)used_bytes > 0) {
-        // More-or-less valid tlab.  The load_acquire above should ensure
-        // that the result of the add is <= the instantaneous value
-        return allocated_bytes + used_bytes;
-      }
-    }
-    return allocated_bytes;
-  }
+  inline jlong cooked_allocated_bytes();
 
   TRACE_DATA* trace_data()              { return &_trace_data; }
 
@@ -566,7 +555,7 @@
   void    set_lgrp_id(int value) { _lgrp_id = value; }
 
   // Printing
-  void print_on(outputStream* st) const;
+  virtual void print_on(outputStream* st) const;
   void print() const { print_on(tty); }
   virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
 
@@ -700,6 +689,7 @@
   virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
   JavaThread *processed_thread() { return _processed_thread; }
   void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
+  virtual void print_on(outputStream* st) const;
 };
 
 // Worker threads are named and have an id of an assigned work.
@@ -746,7 +736,6 @@
   // Printing
   char* name() const { return (char*)"VM Periodic Task Thread"; }
   void print_on(outputStream* st) const;
-  void print() const { print_on(tty); }
   void unpark();
 
   // Returns the single instance of WatcherThread
@@ -1046,12 +1035,8 @@
 #else
   // Use membars when accessing volatile _thread_state. See
   // Threads::create_vm() for size checks.
-  JavaThreadState thread_state() const           {
-    return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
-  }
-  void set_thread_state(JavaThreadState s)       {
-    OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
-  }
+  inline JavaThreadState thread_state() const;
+  inline void set_thread_state(JavaThreadState s);
 #endif
   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
@@ -1459,7 +1444,6 @@
   // Misc. operations
   char* name() const { return (char*)get_thread_name(); }
   void print_on(outputStream* st) const;
-  void print() const { print_on(tty); }
   void print_value();
   void print_thread_state_on(outputStream* ) const      PRODUCT_RETURN;
   void print_thread_state() const                       PRODUCT_RETURN;
@@ -1775,7 +1759,7 @@
   // clearing/querying jni attach status
   bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
   bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
-  void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); }
+  inline void set_done_attaching_via_jni();
 private:
   // This field is used to determine if a thread has claimed
   // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
--- a/hotspot/src/share/vm/runtime/thread.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -46,4 +46,32 @@
 
 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
+inline jlong Thread::cooked_allocated_bytes() {
+  jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
+  if (UseTLAB) {
+    size_t used_bytes = tlab().used_bytes();
+    if ((ssize_t)used_bytes > 0) {
+      // More-or-less valid tlab. The load_acquire above should ensure
+      // that the result of the add is <= the instantaneous value.
+      return allocated_bytes + used_bytes;
+    }
+  }
+  return allocated_bytes;
+}
+
+#ifdef PPC64
+inline JavaThreadState JavaThread::thread_state() const    {
+  return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
+}
+
+inline void JavaThread::set_thread_state(JavaThreadState s) {
+  OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
+}
+#endif
+
+inline void JavaThread::set_done_attaching_via_jni() {
+  _jni_attach_state = _attached_via_jni;
+  OrderAccess::fence();
+}
+
 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -878,7 +878,7 @@
   nonstatic_field(nmethod,             _entry_point,                                  address)                               \
   nonstatic_field(nmethod,             _verified_entry_point,                         address)                               \
   nonstatic_field(nmethod,             _osr_entry_point,                              address)                               \
-  nonstatic_field(nmethod,             _lock_count,                                   jint)                                  \
+  volatile_nonstatic_field(nmethod,    _lock_count,                                   jint)                                  \
   nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
   nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
   nonstatic_field(nmethod,             _comp_level,                                   int)                                   \
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -339,12 +339,6 @@
   }
 }
 
-void VMThread::print_on(outputStream* st) const {
-  st->print("\"%s\" ", name());
-  Thread::print_on(st);
-  st->cr();
-}
-
 void VMThread::evaluate_operation(VM_Operation* op) {
   ResourceMark rm;
 
--- a/hotspot/src/share/vm/runtime/vmThread.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/runtime/vmThread.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -128,9 +128,6 @@
   // GC support
   void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
-  // Debugging
-  void print_on(outputStream* st) const;
-  void print() const                              { print_on(tty); }
   void verify();
 
   // Performance measurement
--- a/hotspot/src/share/vm/services/management.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/management.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -39,6 +39,7 @@
 #include "runtime/jniHandles.hpp"
 #include "runtime/os.hpp"
 #include "runtime/serviceThread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
--- a/hotspot/src/share/vm/services/memTracker.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/memTracker.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,6 +29,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/threadCritical.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/memPtr.hpp"
 #include "services/memReporter.hpp"
--- a/hotspot/src/share/vm/services/memoryManager.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/memoryManager.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,6 +28,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/lowMemoryDetector.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
--- a/hotspot/src/share/vm/services/memoryPool.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/memoryPool.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -29,6 +29,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/lowMemoryDetector.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
--- a/hotspot/src/share/vm/services/memoryService.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -123,12 +123,12 @@
 void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
   CollectorPolicy* policy = heap->collector_policy();
 
-  assert(policy->is_two_generation_policy(), "Only support two generations");
+  assert(policy->is_generation_policy(), "Only support two generations");
   guarantee(heap->n_gens() == 2, "Only support two-generation heap");
 
-  TwoGenerationCollectorPolicy* two_gen_policy = policy->as_two_generation_policy();
-  if (two_gen_policy != NULL) {
-    GenerationSpec** specs = two_gen_policy->generations();
+  GenCollectorPolicy* gen_policy = policy->as_generation_policy();
+  if (gen_policy != NULL) {
+    GenerationSpec** specs = gen_policy->generations();
     Generation::Name kind = specs[0]->name();
     switch (kind) {
       case Generation::DefNew:
--- a/hotspot/src/share/vm/services/threadService.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/services/threadService.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -33,6 +33,7 @@
 #include "runtime/init.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vframe.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
--- a/hotspot/src/share/vm/utilities/array.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/array.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metaspace.hpp"
+#include "runtime/orderAccess.hpp"
 
 // correct linkage required to compile w/o warnings
 // (must be on file level - cannot be local)
--- a/hotspot/src/share/vm/utilities/constantTag.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,6 +76,20 @@
 }
 
 
+jbyte constantTag::error_value() const {
+  switch (_tag) {
+  case JVM_CONSTANT_UnresolvedClass:
+    return JVM_CONSTANT_UnresolvedClassInError;
+  case JVM_CONSTANT_MethodHandle:
+    return JVM_CONSTANT_MethodHandleInError;
+  case JVM_CONSTANT_MethodType:
+    return JVM_CONSTANT_MethodTypeInError;
+  default:
+    ShouldNotReachHere();
+    return JVM_CONSTANT_Invalid;
+  }
+}
+
 const char* constantTag::internal_name() const {
   switch (_tag) {
     case JVM_CONSTANT_Invalid :
--- a/hotspot/src/share/vm/utilities/constantTag.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,7 @@
   }
 
   jbyte value() const                { return _tag; }
+  jbyte error_value() const;
   jbyte non_error_value() const;
 
   BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
--- a/hotspot/src/share/vm/utilities/hashtable.inline.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/hashtable.inline.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_UTILITIES_HASHTABLE_INLINE_HPP
 
 #include "memory/allocation.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/dtrace.hpp"
 
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Wed Jul 05 19:40:49 2017 +0200
@@ -28,40 +28,8 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/stack.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
 
 // Simple TaskQueue stats that are collected by default in debug builds.
 
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Wed Jul 05 19:40:49 2017 +0200
@@ -30,7 +30,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/os.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
--- a/hotspot/test/compiler/5091921/Test7005594.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/5091921/Test7005594.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 /**
  * @test
  * @bug 7005594
+ * @ignore 7117034
  * @summary Array overflow not handled correctly with loop optimzations
  *
  * @run shell Test7005594.sh
--- a/hotspot/test/compiler/7052494/Test7052494.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/7052494/Test7052494.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 /**
  * @test
  * @bug 7052494
+ * @ignore 7154567
  * @summary Eclipse test fails on JDK 7 b142
  *
  * @run main/othervm -Xbatch Test7052494
--- a/hotspot/test/compiler/7184394/TestAESBase.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/7184394/TestAESBase.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,9 +40,20 @@
   int msgSize = Integer.getInteger("msgSize", 646);
   boolean checkOutput = Boolean.getBoolean("checkOutput");
   boolean noReinit = Boolean.getBoolean("noReinit");
+  boolean testingMisalignment;
+  private static final int ALIGN = 8;
+  int encInputOffset = Integer.getInteger("encInputOffset", 0) % ALIGN;
+  int encOutputOffset = Integer.getInteger("encOutputOffset", 0) % ALIGN;
+  int decOutputOffset = Integer.getInteger("decOutputOffset", 0) % ALIGN;
+  int lastChunkSize = Integer.getInteger("lastChunkSize", 32);
   int keySize = Integer.getInteger("keySize", 128);
+  int inputLength;
+  int encodeLength;
+  int decodeLength;
+  int decodeMsgSize;
   String algorithm = System.getProperty("algorithm", "AES");
   String mode = System.getProperty("mode", "CBC");
+  String paddingStr = System.getProperty("paddingStr", "PKCS5Padding");
   byte[] input;
   byte[] encode;
   byte[] expectedEncode;
@@ -51,7 +62,6 @@
   Random random = new Random(0);
   Cipher cipher;
   Cipher dCipher;
-  String paddingStr = "PKCS5Padding";
   AlgorithmParameters algParams;
   SecretKey key;
 
@@ -67,7 +77,10 @@
 
   public void prepare() {
     try {
-    System.out.println("\nalgorithm=" + algorithm + ", mode=" + mode + ", msgSize=" + msgSize + ", keySize=" + keySize + ", noReinit=" + noReinit + ", checkOutput=" + checkOutput);
+    System.out.println("\nalgorithm=" + algorithm + ", mode=" + mode + ", paddingStr=" + paddingStr + ", msgSize=" + msgSize + ", keySize=" + keySize + ", noReinit=" + noReinit + ", checkOutput=" + checkOutput + ", encInputOffset=" + encInputOffset + ", encOutputOffset=" + encOutputOffset + ", decOutputOffset=" + decOutputOffset + ", lastChunkSize=" +lastChunkSize );
+
+      if (encInputOffset % ALIGN != 0 || encOutputOffset % ALIGN != 0 || decOutputOffset % ALIGN !=0 )
+        testingMisalignment = true;
 
       int keyLenBytes = (keySize == 0 ? 16 : keySize/8);
       byte keyBytes[] = new byte[keyLenBytes];
@@ -81,10 +94,6 @@
         System.out.println("Algorithm: " + key.getAlgorithm() + "("
                            + key.getEncoded().length * 8 + "bit)");
       }
-      input = new byte[msgSize];
-      for (int i=0; i<input.length; i++) {
-        input[i] = (byte) (i & 0xff);
-      }
 
       cipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE");
       dCipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE");
@@ -103,10 +112,35 @@
         childShowCipher();
       }
 
+      inputLength = msgSize + encInputOffset;
+      if (testingMisalignment) {
+        encodeLength = cipher.getOutputSize(msgSize - lastChunkSize) + encOutputOffset;
+        encodeLength += cipher.getOutputSize(lastChunkSize);
+        decodeLength = dCipher.getOutputSize(encodeLength - lastChunkSize) + decOutputOffset;
+        decodeLength += dCipher.getOutputSize(lastChunkSize);
+      } else {
+        encodeLength = cipher.getOutputSize(msgSize) + encOutputOffset;
+        decodeLength = dCipher.getOutputSize(encodeLength) + decOutputOffset;
+      }
+
+      input = new byte[inputLength];
+      for (int i=encInputOffset, j=0; i<inputLength; i++, j++) {
+        input[i] = (byte) (j & 0xff);
+      }
+
       // do one encode and decode in preparation
-      // this will also create the encode buffer and decode buffer
-      encode = cipher.doFinal(input);
-      decode = dCipher.doFinal(encode);
+      encode = new byte[encodeLength];
+      decode = new byte[decodeLength];
+      if (testingMisalignment) {
+        decodeMsgSize = cipher.update(input, encInputOffset, (msgSize - lastChunkSize), encode, encOutputOffset);
+        decodeMsgSize += cipher.doFinal(input, (encInputOffset + msgSize - lastChunkSize), lastChunkSize, encode, (encOutputOffset + decodeMsgSize));
+
+        int tempSize = dCipher.update(encode, encOutputOffset, (decodeMsgSize - lastChunkSize), decode, decOutputOffset);
+        dCipher.doFinal(encode, (encOutputOffset + decodeMsgSize - lastChunkSize), lastChunkSize, decode, (decOutputOffset + tempSize));
+      } else {
+        decodeMsgSize = cipher.doFinal(input, encInputOffset, msgSize, encode, encOutputOffset);
+        dCipher.doFinal(encode, encOutputOffset, decodeMsgSize, decode, decOutputOffset);
+      }
       if (checkOutput) {
         expectedEncode = (byte[]) encode.clone();
         expectedDecode = (byte[]) decode.clone();
--- a/hotspot/test/compiler/7184394/TestAESDecode.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/7184394/TestAESDecode.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,14 +33,15 @@
   public void run() {
     try {
       if (!noReinit) dCipher.init(Cipher.DECRYPT_MODE, key, algParams);
+      decode = new byte[decodeLength];
+      if (testingMisalignment) {
+        int tempSize = dCipher.update(encode, encOutputOffset, (decodeMsgSize - lastChunkSize), decode, decOutputOffset);
+        dCipher.doFinal(encode, (encOutputOffset + decodeMsgSize - lastChunkSize), lastChunkSize, decode, (decOutputOffset + tempSize));
+      } else {
+        dCipher.doFinal(encode, encOutputOffset, decodeMsgSize, decode, decOutputOffset);
+      }
       if (checkOutput) {
-        // checked version creates new output buffer each time
-        decode = dCipher.doFinal(encode, 0, encode.length);
         compareArrays(decode, expectedDecode);
-      } else {
-        // non-checked version outputs to existing encode buffer for maximum speed
-        decode = new byte[dCipher.getOutputSize(encode.length)];
-        dCipher.doFinal(encode, 0, encode.length, decode);
       }
     }
     catch (Exception e) {
--- a/hotspot/test/compiler/7184394/TestAESEncode.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/7184394/TestAESEncode.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,14 +33,15 @@
   public void run() {
     try {
       if (!noReinit) cipher.init(Cipher.ENCRYPT_MODE, key, algParams);
+      encode = new byte[encodeLength];
+      if (testingMisalignment) {
+        int tempSize = cipher.update(input, encInputOffset, (msgSize - lastChunkSize), encode, encOutputOffset);
+        cipher.doFinal(input, (encInputOffset + msgSize - lastChunkSize), lastChunkSize, encode, (encOutputOffset + tempSize));
+      } else {
+        cipher.doFinal(input, encInputOffset, msgSize, encode, encOutputOffset);
+      }
       if (checkOutput) {
-        // checked version creates new output buffer each time
-        encode = cipher.doFinal(input, 0, msgSize);
         compareArrays(encode, expectedEncode);
-      } else {
-        // non-checked version outputs to existing encode buffer for maximum speed
-        encode = new byte[cipher.getOutputSize(msgSize)];
-        cipher.doFinal(input, 0, msgSize, encode);
       }
     }
     catch (Exception e) {
--- a/hotspot/test/compiler/7184394/TestAESMain.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/7184394/TestAESMain.java	Wed Jul 05 19:40:49 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,19 @@
  * @summary add intrinsics to use AES instructions
  *
  * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencInputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencInputOffset=1 -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain
  * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain
  *
  * @author Tom Deneau
  */
@@ -36,12 +48,13 @@
 public class TestAESMain {
   public static void main(String[] args) {
     int iters = (args.length > 0 ? Integer.valueOf(args[0]) : 1000000);
+    int warmupIters = (args.length > 1 ? Integer.valueOf(args[1]) : 20000);
     System.out.println(iters + " iterations");
     TestAESEncode etest = new TestAESEncode();
     etest.prepare();
-    // warm-up for 20K iterations
+    // warm-up
     System.out.println("Starting encryption warm-up");
-    for (int i=0; i<20000; i++) {
+    for (int i=0; i<warmupIters; i++) {
       etest.run();
     }
     System.out.println("Finished encryption warm-up");
@@ -54,9 +67,9 @@
 
     TestAESDecode dtest = new TestAESDecode();
     dtest.prepare();
-    // warm-up for 20K iterations
+    // warm-up
     System.out.println("Starting decryption warm-up");
-    for (int i=0; i<20000; i++) {
+    for (int i=0; i<warmupIters; i++) {
       dtest.run();
     }
     System.out.println("Finished decryption warm-up");
--- a/hotspot/test/compiler/ciReplay/TestVM.sh	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/TestVM.sh	Wed Jul 05 19:40:49 2017 +0200
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 ##
 ## @test
 ## @bug 8011675
+## @ignore 8032498
 ## @summary testing of ciReplay with using generated by VM replay.txt 
 ## @author igor.ignatyev@oracle.com
 ## @run shell TestVM.sh
--- a/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh	Wed Jul 05 19:40:49 2017 +0200
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 ##
 ## @test
 ## @bug 8011675
+## @ignore 8032498
 ## @summary testing of ciReplay with using generated by VM replay.txt w/o comp_level
 ## @author igor.ignatyev@oracle.com
 ## @run shell TestVM_no_comp_level.sh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/floatingpoint/ModNaN.java	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8015396
+ * @summary double a%b returns NaN for some (a,b) (|a| < inf, |b|>0) (on Core i7 980X)
+ *
+ * @run main ModNaN
+ */
+public class ModNaN {
+    /* This bug was seen in the field for a particular version of the VM,
+     * but never reproduced internally, and the reason was never known,
+     * nor were the exact circumstances of the failure.
+     */
+    /*
+     * Failed on Windows 7/Core i7 980X/1.6.0_38 (64-bit):
+     *
+     * NaNs (i.e., when it fails, this is what we expect to see):
+     *
+     * 8.98846567431158E307 % 1.295163E-318 = NaN
+     * (0x7FE0000000000000L % 0x0000000000040000L)
+     *
+     * 1.7976931348623157E308 % 2.59032E-318 = NaN
+     * (0x7FEFFFFFFFFFFFFFL % 0x000000000007FFFFL)
+     *
+     * 1.7976931348623157E308 % 1.060997895E-314 = NaN
+     * (0x7FEFFFFFFFFFFFFFL % 0x000000007FFFFFFFL)
+     *
+     * 1.7976931348623157E308 % 6.767486E-317 = NaN
+     * (0x7FEFFFFFFFFFFFFFL % 0x0000000000d10208L)
+     *
+     * 1.7976931348623157E308 % 7.528725E-318 = NaN
+     * (0x7FEFFFFFFFFFFFFFL % 0x0000000000174077L)
+     *
+     * These cases did not fail, even when the previous five did:
+     * 8.98846567431158E307 % 1.29516E-318 = 2.53E-321
+     * (0x7fe0000000000000L % 0x000000000003ffffL)
+     *
+     * 1.7976931348623157E308 % 2.590327E-318 = 0.0
+     * (0x7fefffffffffffffL % 0x0000000000080000L)
+     *
+     * 1.7976931348623157E308 % 1.060965516E-314 = 9.35818525E-315
+     * (0x7fefffffffffffffL % 0x000000007ffeffffL)
+     *
+     */
+
+    static double[][] bad = new double[][] {
+            /*
+             * These hex numbers correspond to the base-10 doubles in the
+             * comment above; this can be checked by observing the output
+             * of testWithPrint.
+             */
+            new double[] { Double.longBitsToDouble(0x7FE0000000000000L),
+                    Double.longBitsToDouble(0x0000000000040000L) },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    Double.longBitsToDouble(0x000000000007FFFFL) },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    Double.longBitsToDouble(0x000000007FFFFFFFL) },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    6.767486E-317 },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    7.528725E-318 }, };
+
+    static double[][] good = new double[][] {
+            new double[] { Double.longBitsToDouble(0x7FE0000000000000L),
+                    Double.longBitsToDouble(0x000000000003FFFFL) },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    Double.longBitsToDouble(0x0000000000080000L) },
+            new double[] { Double.longBitsToDouble(0x7FEFFFFFFFFFFFFFL),
+                    Double.longBitsToDouble(0x000000007FFEFFFFL) }, };
+
+    public static void main(String[] args) throws InterruptedException {
+        int N = 10000;
+        testWithPrint();
+        for (int i = 0; i < N; i++)
+            testStrict();
+        for (int i = 0; i < N; i++)
+            test();
+        Thread.sleep(1000); // pause to let the compiler work
+        for (int i = 0; i < 10; i++)
+            testStrict();
+        for (int i = 0; i < 10; i++)
+            test();
+    }
+
+    public strictfp static void testWithPrint() {
+        for (double[] ab : bad) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            System.out.println("" + a + "("+toHexRep(a)+") mod " +
+                                    b + "("+toHexRep(b)+") yields " +
+                                    mod + "("+toHexRep(mod)+")");
+        }
+
+        for (double[] ab : good) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            System.out.println("" + a + "("+toHexRep(a)+") mod " +
+                                    b + "("+toHexRep(b)+") yields " +
+                                    mod + "("+toHexRep(mod)+")");
+        }
+    }
+
+    public strictfp static void testStrict() {
+        for (double[] ab : bad) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            check(mod);
+        }
+
+        for (double[] ab : good) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            check(mod);
+        }
+    }
+
+    public static void test() {
+        for (double[] ab : bad) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            check(mod);
+        }
+
+        for (double[] ab : good) {
+            double a = ab[0];
+            double b = ab[1];
+            double mod = a % b;
+            check(mod);
+        }
+    }
+
+    static String toHexRep(double d) {
+        return "0x" + Long.toHexString(Double.doubleToRawLongBits(d)) + "L";
+    }
+
+    static void check(double mod) {
+        if (Double.isNaN(mod)) {
+            throw new Error("Saw a NaN, fail");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/profiling/TestMethodHandleInvokesIntrinsic.java	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8041458
+ * @summary profiling of arguments in C1 at MethodHandle invoke of intrinsic tries to profile popped argument.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TieredStopAtLevel=3 TestMethodHandleInvokesIntrinsic
+ *
+ */
+
+import java.lang.invoke.*;
+
+public class TestMethodHandleInvokesIntrinsic {
+
+    static final MethodHandle mh_nanoTime;
+    static final MethodHandle mh_getClass;
+    static {
+        MethodHandles.Lookup lookup = MethodHandles.lookup();
+        MethodType mt = MethodType.methodType(long.class);
+        MethodHandle MH = null;
+        try {
+            MH = lookup.findStatic(System.class, "nanoTime", mt);
+        } catch(NoSuchMethodException nsme) {
+            nsme.printStackTrace();
+            throw new RuntimeException("TEST FAILED", nsme);
+        } catch(IllegalAccessException iae) {
+            iae.printStackTrace();
+            throw new RuntimeException("TEST FAILED", iae);
+        }
+        mh_nanoTime = MH;
+
+        mt = MethodType.methodType(Class.class);
+        MH = null;
+        try {
+            MH = lookup.findVirtual(Object.class, "getClass", mt);
+        } catch(NoSuchMethodException nsme) {
+            nsme.printStackTrace();
+            throw new RuntimeException("TEST FAILED", nsme);
+        } catch(IllegalAccessException iae) {
+            iae.printStackTrace();
+            throw new RuntimeException("TEST FAILED", iae);
+        }
+        mh_getClass = MH;
+    }
+
+    static long m1() throws Throwable {
+        return (long)mh_nanoTime.invokeExact();
+    }
+
+    static Class m2(Object o) throws Throwable {
+        return (Class)mh_getClass.invokeExact(o);
+    }
+
+    static public void main(String[] args) {
+        try {
+            for (int i = 0; i < 20000; i++) {
+                m1();
+            }
+            TestMethodHandleInvokesIntrinsic o = new TestMethodHandleInvokesIntrinsic();
+            for (int i = 0; i < 20000; i++) {
+                m2(o);
+            }
+        } catch(Throwable t) {
+            System.out.println("Unexpected exception");
+            t.printStackTrace();
+            throw new RuntimeException("TEST FAILED", t);
+        }
+
+        System.out.println("TEST PASSED");
+    }
+}
--- a/hotspot/test/compiler/profiling/spectrapredefineclass/Agent.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/profiling/spectrapredefineclass/Agent.java	Wed Jul 05 19:40:49 2017 +0200
@@ -66,15 +66,6 @@
 public class Agent implements ClassFileTransformer {
 
 
-    static class MemoryChunk {
-        MemoryChunk other;
-        long[] array;
-        MemoryChunk(MemoryChunk other) {
-            other = other;
-            array = new long[1024 * 1024 * 1024];
-        }
-    }
-
     static public boolean m2(A a) {
         boolean res = false;
         if (a.getClass() == B.class) {
--- a/hotspot/test/compiler/profiling/spectrapredefineclass/Launcher.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/profiling/spectrapredefineclass/Launcher.java	Wed Jul 05 19:40:49 2017 +0200
@@ -30,7 +30,7 @@
  * @build Agent
  * @run main ClassFileInstaller Agent
  * @run main Launcher
- * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -Xmx1M -XX:ReservedCodeCacheSize=3M Agent
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -XX:ReservedCodeCacheSize=3M Agent
  */
 public class Launcher {
     public static void main(String[] args) throws Exception  {
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Wed Jul 05 19:40:49 2017 +0200
@@ -27,7 +27,7 @@
  * @bug 8031320
  * @summary Verify UseRTMDeopt option processing on CPUs with rtm support
  *          when rtm locking is supported by VM.
- * @library /testlibrary /testlibrary/whitebox
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMDeoptOptionOnSupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
--- a/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Wed Jul 05 19:40:49 2017 +0200
@@ -27,7 +27,7 @@
  * @bug 8031320
  * @summary Verify UseRTMDeopt option processing on CPUs without rtm support
  *          or on VMs without rtm locking support.
- * @library /testlibrary /testlibrary/whitebox
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
  * @build TestUseRTMDeoptOptionOnUnsupportedConfig
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
--- a/hotspot/test/compiler/startup/NumCompilerThreadsCheck.java	Thu May 15 10:41:25 2014 -0700
+++ b/hotspot/test/compiler/startup/NumCompilerThreadsCheck.java	Wed Jul 05 19:40:49 2017 +0200
@@ -30,11 +30,28 @@
 import com.oracle.java.testlibrary.*;
 
 public class NumCompilerThreadsCheck {
+
   public static void main(String[] args) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:CICompilerCount=-1");
     OutputAnalyzer out = new OutputAnalyzer(pb.start());
 
     String expectedOutput = "CICompilerCount of -1 is invalid";
     out.shouldContain(expectedOutput);
+
+    if (isZeroVm()) {
+      String expectedLowWaterMarkText = "must be at least 0";
+      out.shouldContain(expectedLowWaterMarkText);
+    }
+  }
+
+  private static boolean isZeroVm() {
+    String vmName = System.getProperty("java.vm.name");
+    if (vmName == null) {
+      throw new RuntimeException("No VM name");
+    }
+    if (vmName.toLowerCase().contains("zero")) {
+      return true;
+    }
+    return false;
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassResolutionFail/Property.java	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// Class PropertySuper is not found.
+
+public class Property extends PropertySuper {
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassResolutionFail/PropertySuper.java	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// Class PropertySuper should be removed.
+
+public class PropertySuper {
+  PropertySuper() { System.out.println("remove me for NoClassDefFoundError"); }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ClassResolutionFail/TestClassResolutionFail.java	Wed Jul 05 19:40:49 2017 +0200
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestClassResolutionFail
+ * @bug 8023697
+ * @summary This tests that failed class resolution doesn't report different class name in detail message for the first and subsequent times
+ */
+
+import java.io.File;
+
+public class TestClassResolutionFail {
+    static String message;
+    public static void test1() throws RuntimeException {
+        try {
+            Property p = new Property();
+        } catch (LinkageError e) {
+            message = e.getMessage();
+        }
+        try {
+            Property p = new Property();
+        } catch (LinkageError e) {
+            System.out.println(e.getMessage());
+            if (!e.getMessage().equals(message)) {
+                throw new RuntimeException("Wrong message: " + message + " != " + e.getMessage());
+            }
+        }
+    }
+    public static void main(java.lang.String[] unused) throws Exception {
+        // Remove PropertySuper class
+        String testClasses = System.getProperty("test.classes", ".");
+        File f = new File(testClasses + File.separator + "PropertySuper.class");
+        f.delete();
+        test1();
+    }
+}
+