Merge
authortrims
Fri, 02 Jul 2010 01:36:15 -0700
changeset 5909 103c36d2b04b
parent 5859 2d6ba7a22191 (current diff)
parent 5908 3adeeb9c080e (diff)
child 5910 ca3d85421cbb
Merge
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Fri Jul 02 01:36:15 2010 -0700
@@ -72,6 +72,7 @@
       addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class);
+      addBytecodeClass(Bytecodes._invokedynamic, BytecodeInvoke.class);
       addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class);
       addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class);
       addBytecodeClass(Bytecodes._iload, BytecodeLoad.class);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java	Fri Jul 02 01:36:15 2010 -0700
@@ -54,15 +54,31 @@
   // returns the name of the invoked method
   public Symbol name() {
     ConstantPool cp = method().getConstants();
+    if (isInvokedynamic()) {
+       int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
+       return cp.getSymbolAt(nt[0]);
+    }
     return cp.getNameRefAt(index());
   }
 
   // returns the signature of the invoked method
   public Symbol signature() {
     ConstantPool cp = method().getConstants();
+    if (isInvokedynamic()) {
+       int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
+       return cp.getSymbolAt(nt[1]);
+    }
     return cp.getSignatureRefAt(index());
   }
 
+  public int getSecondaryIndex() {
+    if (isInvokedynamic()) {
+      // change byte-ordering of 4-byte integer
+      return VM.getVM().getBytes().swapInt(javaSignedWordAt(1));
+    }
+    return super.getSecondaryIndex();  // throw an error
+  }
+
   public Method getInvokedMethod() {
     return method().getConstants().getMethodRefAt(index());
   }
@@ -87,6 +103,7 @@
   public boolean isInvokevirtual()   { return adjustedInvokeCode() == Bytecodes._invokevirtual;   }
   public boolean isInvokestatic()    { return adjustedInvokeCode() == Bytecodes._invokestatic;    }
   public boolean isInvokespecial()   { return adjustedInvokeCode() == Bytecodes._invokespecial;   }
+  public boolean isInvokedynamic()   { return adjustedInvokeCode() == Bytecodes._invokedynamic; }
 
   public boolean isValid()           { return isInvokeinterface() ||
                                               isInvokevirtual()   ||
@@ -104,6 +121,11 @@
     buf.append(spaces);
     buf.append('#');
     buf.append(Integer.toString(indexForFieldOrMethod()));
+    if (isInvokedynamic()) {
+       buf.append('(');
+       buf.append(Integer.toString(getSecondaryIndex()));
+       buf.append(')');
+    }
     buf.append(" [Method ");
     StringBuffer sigBuf = new StringBuffer();
     new SignatureConverter(signature(), sigBuf).iterateReturntype();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java	Fri Jul 02 01:36:15 2010 -0700
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.interpreter;
 
 import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.utilities.*;
 
 public class BytecodeLoadConstant extends BytecodeWithCPIndex {
@@ -32,10 +33,47 @@
     super(method, bci);
   }
 
+  public boolean hasCacheIndex() {
+    // normal ldc uses CP index, but fast_aldc uses swapped CP cache index
+    return javaCode() != code();
+  }
+
   public int index() {
-    return javaCode() == Bytecodes._ldc ?
+    int i = javaCode() == Bytecodes._ldc ?
                  (int) (0xFF & javaByteAt(1))
                : (int) (0xFFFF & javaShortAt(1));
+    if (hasCacheIndex()) {
+      return (0xFFFF & VM.getVM().getBytes().swapShort((short) i));
+    } else {
+      return i;
+    }
+  }
+
+  public int poolIndex() {
+    int i = index();
+    if (hasCacheIndex()) {
+      ConstantPoolCache cpCache = method().getConstants().getCache();
+      return cpCache.getEntryAt(i).getConstantPoolIndex();
+    } else {
+      return i;
+    }
+  }
+
+  public int cacheIndex() {
+    if (hasCacheIndex()) {
+      return index();
+    } else {
+      return -1;  // no cache index
+    }
+  }
+
+  private Oop getCachedConstant() {
+    int i = cacheIndex();
+    if (i >= 0) {
+      ConstantPoolCache cpCache = method().getConstants().getCache();
+      return cpCache.getEntryAt(i).getF1();
+    }
+    return null;
   }
 
   public void verify() {
@@ -58,6 +96,7 @@
        // has to be int or float or String or Klass
        return (ctag.isUnresolvedString() || ctag.isString()
                || ctag.isUnresolvedKlass() || ctag.isKlass()
+               || ctag.isMethodHandle() || ctag.isMethodType()
                || ctag.isInt() || ctag.isFloat())? true: false;
     }
   }
@@ -112,7 +151,7 @@
 
   public String getConstantValue() {
     ConstantPool cpool = method().getConstants();
-    int cpIndex = index();
+    int cpIndex = poolIndex();
     ConstantTag ctag = cpool.getTagAt(cpIndex);
     if (ctag.isInt()) {
        return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">";
@@ -149,6 +188,18 @@
        } else {
           throw new RuntimeException("should not reach here");
        }
+    } else if (ctag.isMethodHandle() || ctag.isMethodType()) {
+       Oop x = getCachedConstant();
+       int refidx = cpool.getMethodHandleIndexAt(cpIndex);
+       int refkind = cpool.getMethodHandleRefKindAt(cpIndex);
+       return "<MethodHandle kind=" + Integer.toString(refkind) +
+           " ref=" + Integer.toString(refidx)
+           + (x == null ? "" : " @" + x.getHandle()) + ">";
+    } else if (ctag.isMethodType()) {
+       Oop x = getCachedConstant();
+       int refidx = cpool.getMethodTypeIndexAt(cpIndex);
+       return "<MethodType " + cpool.getSymbolAt(refidx).asString()
+           + (x == null ? "" : " @" + x.getHandle()) + ">";
     } else {
        if (Assert.ASSERTS_ENABLED) {
          Assert.that(false, "invalid load constant type");
@@ -162,7 +213,12 @@
     buf.append(getJavaBytecodeName());
     buf.append(spaces);
     buf.append('#');
-    buf.append(Integer.toString(index()));
+    buf.append(Integer.toString(poolIndex()));
+    if (hasCacheIndex()) {
+       buf.append('(');
+       buf.append(Integer.toString(cacheIndex()));
+       buf.append(')');
+    }
     buf.append(spaces);
     buf.append(getConstantValue());
     if (code() != javaCode()) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java	Fri Jul 02 01:36:15 2010 -0700
@@ -37,12 +37,19 @@
   // the constant pool index for this bytecode
   public int index() { return 0xFFFF & javaShortAt(1); }
 
+  public int getSecondaryIndex() {
+     throw new IllegalArgumentException("must be invokedynamic");
+  }
+
   protected int indexForFieldOrMethod() {
      ConstantPoolCache cpCache = method().getConstants().getCache();
      // get ConstantPool index from ConstantPoolCacheIndex at given bci
      int cpCacheIndex = index();
      if (cpCache == null) {
         return cpCacheIndex;
+     } else if (code() == Bytecodes._invokedynamic) {
+        int secondaryIndex = getSecondaryIndex();
+        return cpCache.getMainEntryAt(secondaryIndex).getConstantPoolIndex();
      } else {
         // change byte-ordering and go via cache
         return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java	Fri Jul 02 01:36:15 2010 -0700
@@ -222,7 +222,7 @@
   public static final int _invokespecial        = 183; // 0xb7
   public static final int _invokestatic         = 184; // 0xb8
   public static final int _invokeinterface      = 185; // 0xb9
-  public static final int _xxxunusedxxx         = 186; // 0xba
+  public static final int _invokedynamic        = 186; // 0xba
   public static final int _new                  = 187; // 0xbb
   public static final int _newarray             = 188; // 0xbc
   public static final int _anewarray            = 189; // 0xbd
@@ -269,9 +269,12 @@
   public static final int _fast_invokevfinal    = 226;
   public static final int _fast_linearswitch    = 227;
   public static final int _fast_binaryswitch    = 228;
-  public static final int _shouldnotreachhere   = 229; // For debugging
+  public static final int _fast_aldc            = 229;
+  public static final int _fast_aldc_w          = 230;
+  public static final int _return_register_finalizer = 231;
+  public static final int _shouldnotreachhere   = 232; // For debugging
 
-  public static final int number_of_codes       = 230;
+  public static final int number_of_codes       = 233;
 
   public static int specialLengthAt(Method method, int bci) {
     int code = codeAt(method, bci);
@@ -458,9 +461,9 @@
     def(_dconst_1            , "dconst_1"            , "b"    , null    , BasicType.getTDouble() ,  2, false);
     def(_bipush              , "bipush"              , "bc"   , null    , BasicType.getTInt()    ,  1, false);
     def(_sipush              , "sipush"              , "bcc"  , null    , BasicType.getTInt()    ,  1, false);
-    def(_ldc                 , "ldc"                 , "bi"   , null    , BasicType.getTIllegal(),  1, true );
-    def(_ldc_w               , "ldc_w"               , "bii"  , null    , BasicType.getTIllegal(),  1, true );
-    def(_ldc2_w              , "ldc2_w"              , "bii"  , null    , BasicType.getTIllegal(),  2, true );
+    def(_ldc                 , "ldc"                 , "bk"   , null    , BasicType.getTIllegal(),  1, true );
+    def(_ldc_w               , "ldc_w"               , "bkk"  , null    , BasicType.getTIllegal(),  1, true );
+    def(_ldc2_w              , "ldc2_w"              , "bkk"  , null    , BasicType.getTIllegal(),  2, true );
     def(_iload               , "iload"               , "bi"   , "wbii"  , BasicType.getTInt()    ,  1, false);
     def(_lload               , "lload"               , "bi"   , "wbii"  , BasicType.getTLong()   ,  2, false);
     def(_fload               , "fload"               , "bi"   , "wbii"  , BasicType.getTFloat()  ,  1, false);
@@ -618,26 +621,26 @@
     def(_dreturn             , "dreturn"             , "b"    , null    , BasicType.getTDouble() , -2, true );
     def(_areturn             , "areturn"             , "b"    , null    , BasicType.getTObject() , -1, true );
     def(_return              , "return"              , "b"    , null    , BasicType.getTVoid()   ,  0, true );
-    def(_getstatic           , "getstatic"           , "bjj"  , null    , BasicType.getTIllegal(),  1, true );
-    def(_putstatic           , "putstatic"           , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_getfield            , "getfield"            , "bjj"  , null    , BasicType.getTIllegal(),  0, true );
-    def(_putfield            , "putfield"            , "bjj"  , null    , BasicType.getTIllegal(), -2, true );
-    def(_invokevirtual       , "invokevirtual"       , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_invokespecial       , "invokespecial"       , "bjj"  , null    , BasicType.getTIllegal(), -1, true );
-    def(_invokestatic        , "invokestatic"        , "bjj"  , null    , BasicType.getTIllegal(),  0, true );
-    def(_invokeinterface     , "invokeinterface"     , "bjj__", null    , BasicType.getTIllegal(), -1, true );
-    def(_xxxunusedxxx        , "xxxunusedxxx"        , null   , null    , BasicType.getTVoid()   ,  0, false);
-    def(_new                 , "new"                 , "bii"  , null    , BasicType.getTObject() ,  1, true );
+    def(_getstatic           , "getstatic"           , "bJJ"  , null    , BasicType.getTIllegal(),  1, true );
+    def(_putstatic           , "putstatic"           , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_getfield            , "getfield"            , "bJJ"  , null    , BasicType.getTIllegal(),  0, true );
+    def(_putfield            , "putfield"            , "bJJ"  , null    , BasicType.getTIllegal(), -2, true );
+    def(_invokevirtual       , "invokevirtual"       , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_invokespecial       , "invokespecial"       , "bJJ"  , null    , BasicType.getTIllegal(), -1, true );
+    def(_invokestatic        , "invokestatic"        , "bJJ"  , null    , BasicType.getTIllegal(),  0, true );
+    def(_invokeinterface     , "invokeinterface"     , "bJJ__", null    , BasicType.getTIllegal(), -1, true );
+    def(_invokedynamic       , "invokedynamic"       , "bJJJJ", null    , BasicType.getTIllegal(), -1, true );
+    def(_new                 , "new"                 , "bkk"  , null    , BasicType.getTObject() ,  1, true );
     def(_newarray            , "newarray"            , "bc"   , null    , BasicType.getTObject() ,  0, true );
-    def(_anewarray           , "anewarray"           , "bii"  , null    , BasicType.getTObject() ,  0, true );
+    def(_anewarray           , "anewarray"           , "bkk"  , null    , BasicType.getTObject() ,  0, true );
     def(_arraylength         , "arraylength"         , "b"    , null    , BasicType.getTVoid()   ,  0, true );
     def(_athrow              , "athrow"              , "b"    , null    , BasicType.getTVoid()   , -1, true );
-    def(_checkcast           , "checkcast"           , "bii"  , null    , BasicType.getTObject() ,  0, true );
-    def(_instanceof          , "instanceof"          , "bii"  , null    , BasicType.getTInt()    ,  0, true );
+    def(_checkcast           , "checkcast"           , "bkk"  , null    , BasicType.getTObject() ,  0, true );
+    def(_instanceof          , "instanceof"          , "bkk"  , null    , BasicType.getTInt()    ,  0, true );
     def(_monitorenter        , "monitorenter"        , "b"    , null    , BasicType.getTVoid()   , -1, true );
     def(_monitorexit         , "monitorexit"         , "b"    , null    , BasicType.getTVoid()   , -1, true );
     def(_wide                , "wide"                , ""     , null    , BasicType.getTVoid()   ,  0, false);
-    def(_multianewarray      , "multianewarray"      , "biic" , null    , BasicType.getTObject() ,  1, true );
+    def(_multianewarray      , "multianewarray"      , "bkkc" , null    , BasicType.getTObject() ,  1, true );
     def(_ifnull              , "ifnull"              , "boo"  , null    , BasicType.getTVoid()   , -1, false);
     def(_ifnonnull           , "ifnonnull"           , "boo"  , null    , BasicType.getTVoid()   , -1, false);
     def(_goto_w              , "goto_w"              , "boooo", null    , BasicType.getTVoid()   ,  0, false);
@@ -646,38 +649,44 @@
 
     //  JVM bytecodes
     //  bytecode               bytecode name           format   wide f.   result tp               stk traps  std code
-    def(_fast_agetfield      , "fast_agetfield"      , "bjj"  , null    , BasicType.getTObject() ,  0, true , _getfield       );
-    def(_fast_bgetfield      , "fast_bgetfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
-    def(_fast_cgetfield      , "fast_cgetfield"      , "bjj"  , null    , BasicType.getTChar()   ,  0, true , _getfield       );
-    def(_fast_dgetfield      , "fast_dgetfield"      , "bjj"  , null    , BasicType.getTDouble() ,  0, true , _getfield       );
-    def(_fast_fgetfield      , "fast_fgetfield"      , "bjj"  , null    , BasicType.getTFloat()  ,  0, true , _getfield       );
-    def(_fast_igetfield      , "fast_igetfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
-    def(_fast_lgetfield      , "fast_lgetfield"      , "bjj"  , null    , BasicType.getTLong()   ,  0, true , _getfield       );
-    def(_fast_sgetfield      , "fast_sgetfield"      , "bjj"  , null    , BasicType.getTShort()  ,  0, true , _getfield       );
+    def(_fast_agetfield      , "fast_agetfield"      , "bJJ"  , null    , BasicType.getTObject() ,  0, true , _getfield       );
+    def(_fast_bgetfield      , "fast_bgetfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
+    def(_fast_cgetfield      , "fast_cgetfield"      , "bJJ"  , null    , BasicType.getTChar()   ,  0, true , _getfield       );
+    def(_fast_dgetfield      , "fast_dgetfield"      , "bJJ"  , null    , BasicType.getTDouble() ,  0, true , _getfield       );
+    def(_fast_fgetfield      , "fast_fgetfield"      , "bJJ"  , null    , BasicType.getTFloat()  ,  0, true , _getfield       );
+    def(_fast_igetfield      , "fast_igetfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _getfield       );
+    def(_fast_lgetfield      , "fast_lgetfield"      , "bJJ"  , null    , BasicType.getTLong()   ,  0, true , _getfield       );
+    def(_fast_sgetfield      , "fast_sgetfield"      , "bJJ"  , null    , BasicType.getTShort()  ,  0, true , _getfield       );
 
-    def(_fast_aputfield      , "fast_aputfield"      , "bjj"  , null    , BasicType.getTObject() ,  0, true , _putfield       );
-    def(_fast_bputfield      , "fast_bputfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
-    def(_fast_cputfield      , "fast_cputfield"      , "bjj"  , null    , BasicType.getTChar()   ,  0, true , _putfield       );
-    def(_fast_dputfield      , "fast_dputfield"      , "bjj"  , null    , BasicType.getTDouble() ,  0, true , _putfield       );
-    def(_fast_fputfield      , "fast_fputfield"      , "bjj"  , null    , BasicType.getTFloat()  ,  0, true , _putfield       );
-    def(_fast_iputfield      , "fast_iputfield"      , "bjj"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
-    def(_fast_lputfield      , "fast_lputfield"      , "bjj"  , null    , BasicType.getTLong()   ,  0, true , _putfield       );
-    def(_fast_sputfield      , "fast_sputfield"      , "bjj"  , null    , BasicType.getTShort()  ,  0, true , _putfield       );
+    def(_fast_aputfield      , "fast_aputfield"      , "bJJ"  , null    , BasicType.getTObject() ,  0, true , _putfield       );
+    def(_fast_bputfield      , "fast_bputfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
+    def(_fast_cputfield      , "fast_cputfield"      , "bJJ"  , null    , BasicType.getTChar()   ,  0, true , _putfield       );
+    def(_fast_dputfield      , "fast_dputfield"      , "bJJ"  , null    , BasicType.getTDouble() ,  0, true , _putfield       );
+    def(_fast_fputfield      , "fast_fputfield"      , "bJJ"  , null    , BasicType.getTFloat()  ,  0, true , _putfield       );
+    def(_fast_iputfield      , "fast_iputfield"      , "bJJ"  , null    , BasicType.getTInt()    ,  0, true , _putfield       );
+    def(_fast_lputfield      , "fast_lputfield"      , "bJJ"  , null    , BasicType.getTLong()   ,  0, true , _putfield       );
+    def(_fast_sputfield      , "fast_sputfield"      , "bJJ"  , null    , BasicType.getTShort()  ,  0, true , _putfield       );
 
     def(_fast_aload_0        , "fast_aload_0"        , "b"    , null    , BasicType.getTObject() ,  1, true , _aload_0        );
-    def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_jj" , null    , BasicType.getTInt()    ,  1, true , _aload_0        );
-    def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_jj" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
-    def(_fast_faccess_0      , "fast_faccess_0"      , "b_jj" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
+    def(_fast_iaccess_0      , "fast_iaccess_0"      , "b_JJ" , null    , BasicType.getTInt()    ,  1, true , _aload_0        );
+    def(_fast_aaccess_0      , "fast_aaccess_0"      , "b_JJ" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
+    def(_fast_faccess_0      , "fast_faccess_0"      , "b_JJ" , null    , BasicType.getTObject() ,  1, true , _aload_0        );
 
     def(_fast_iload          , "fast_iload"          , "bi"   , null    , BasicType.getTInt()    ,  1, false, _iload);
     def(_fast_iload2         , "fast_iload2"         , "bi_i" , null    , BasicType.getTInt()    ,  2, false, _iload);
     def(_fast_icaload        , "fast_icaload"        , "bi_"  , null    , BasicType.getTInt()    ,  0, false, _iload);
 
     // Faster method invocation.
-    def(_fast_invokevfinal   , "fast_invokevfinal"   , "bjj"  , null    , BasicType.getTIllegal(), -1, true, _invokevirtual);
+    def(_fast_invokevfinal   , "fast_invokevfinal"   , "bJJ"  , null    , BasicType.getTIllegal(), -1, true, _invokevirtual);
 
     def(_fast_linearswitch   , "fast_linearswitch"   , ""     , null    , BasicType.getTVoid()   , -1, false, _lookupswitch   );
     def(_fast_binaryswitch   , "fast_binaryswitch"   , ""     , null    , BasicType.getTVoid()   , -1, false, _lookupswitch   );
+
+    def(_return_register_finalizer, "return_register_finalizer", "b"    , null    , BasicType.getTVoid()   , 0, true, _return );
+
+    def(_fast_aldc           , "fast_aldc"           , "bj"   , null    , BasicType.getTObject(),   1, true,  _ldc   );
+    def(_fast_aldc_w         , "fast_aldc_w"         , "bJJ"  , null    , BasicType.getTObject(),   1, true,  _ldc_w );
+
     def(_shouldnotreachhere  , "_shouldnotreachhere" , "b"    , null    , BasicType.getTVoid()   ,  0, false);
 
     if (Assert.ASSERTS_ENABLED) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Jul 02 01:36:15 2010 -0700
@@ -152,7 +152,7 @@
     return res;
   }
 
-  public int getNameAndTypeAt(int which) {
+  public int[] getNameAndTypeAt(int which) {
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool");
     }
@@ -160,18 +160,16 @@
     if (DEBUG) {
       System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i);
     }
-    return i;
+    return new int[] { extractLowShortFromInt(i), extractHighShortFromInt(i) };
   }
 
   public Symbol getNameRefAt(int which) {
-    int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which));
-    int nameIndex = extractLowShortFromInt(refIndex);
+    int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0];
     return getSymbolAt(nameIndex);
   }
 
   public Symbol getSignatureRefAt(int which) {
-    int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which));
-    int sigIndex = extractHighShortFromInt(refIndex);
+    int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1];
     return getSymbolAt(sigIndex);
   }
 
@@ -220,11 +218,11 @@
 
   /** Lookup for entries consisting of (name_index, signature_index) */
   public int getNameRefIndexAt(int index) {
-    int refIndex = getNameAndTypeAt(index);
+    int[] refIndex = getNameAndTypeAt(index);
     if (DEBUG) {
-      System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex);
+      System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
     }
-    int i = extractLowShortFromInt(refIndex);
+    int i = refIndex[0];
     if (DEBUG) {
       System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i);
     }
@@ -233,17 +231,53 @@
 
   /** Lookup for entries consisting of (name_index, signature_index) */
   public int getSignatureRefIndexAt(int index) {
-    int refIndex = getNameAndTypeAt(index);
+    int[] refIndex = getNameAndTypeAt(index);
     if (DEBUG) {
-      System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex);
+      System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
     }
-    int i = extractHighShortFromInt(refIndex);
+    int i = refIndex[1];
     if (DEBUG) {
       System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i);
     }
     return i;
   }
 
+  /** Lookup for MethodHandle entries. */
+  public int getMethodHandleIndexAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
+    }
+    int res = extractHighShortFromInt(getIntAt(i));
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleIndexAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
+  /** Lookup for MethodHandle entries. */
+  public int getMethodHandleRefKindAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
+    }
+    int res = extractLowShortFromInt(getIntAt(i));
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleRefKindAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
+  /** Lookup for MethodType entries. */
+  public int getMethodTypeIndexAt(int i) {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(getTagAt(i).isMethodType(), "Corrupted constant pool");
+    }
+    int res = getIntAt(i);
+    if (DEBUG) {
+      System.err.println("ConstantPool.getMethodHandleTypeAt(" + i + "): result = " + res);
+    }
+    return res;
+  }
+
   final private static String[] nameForTag = new String[] {
   };
 
@@ -261,6 +295,8 @@
     case JVM_CONSTANT_Methodref:          return "JVM_CONSTANT_Methodref";
     case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref";
     case JVM_CONSTANT_NameAndType:        return "JVM_CONSTANT_NameAndType";
+    case JVM_CONSTANT_MethodHandle:       return "JVM_CONSTANT_MethodHandle";
+    case JVM_CONSTANT_MethodType:         return "JVM_CONSTANT_MethodType";
     case JVM_CONSTANT_Invalid:            return "JVM_CONSTANT_Invalid";
     case JVM_CONSTANT_UnresolvedClass:    return "JVM_CONSTANT_UnresolvedClass";
     case JVM_CONSTANT_UnresolvedClassInError:    return "JVM_CONSTANT_UnresolvedClassInError";
@@ -317,6 +353,8 @@
         case JVM_CONSTANT_Methodref:
         case JVM_CONSTANT_InterfaceMethodref:
         case JVM_CONSTANT_NameAndType:
+        case JVM_CONSTANT_MethodHandle:
+        case JVM_CONSTANT_MethodType:
           visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
           break;
         }
@@ -467,6 +505,18 @@
                                           + ", type = " + signatureIndex);
                   break;
               }
+
+              case JVM_CONSTANT_MethodHandle: {
+                  dos.writeByte(cpConstType);
+                  int value = getIntAt(ci);
+                  short nameIndex = (short) extractLowShortFromInt(value);
+                  short signatureIndex = (short) extractHighShortFromInt(value);
+                  dos.writeShort(nameIndex);
+                  dos.writeShort(signatureIndex);
+                  if (DEBUG) debugMessage("CP[" + ci + "] = N&T name = " + nameIndex
+                                          + ", type = " + signatureIndex);
+                  break;
+              }
               default:
                   throw new InternalError("unknown tag: " + cpConstType);
           } // switch
@@ -488,10 +538,12 @@
   //
 
   private static int extractHighShortFromInt(int val) {
+    // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
     return (val >> 16) & 0xFFFF;
   }
 
   private static int extractLowShortFromInt(int val) {
+    // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
     return val & 0xFFFF;
   }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java	Fri Jul 02 01:36:15 2010 -0700
@@ -78,6 +78,31 @@
     return new ConstantPoolCacheEntry(this, i);
   }
 
+  public static boolean isSecondaryIndex(int i)     { return (i < 0); }
+  public static int     decodeSecondaryIndex(int i) { return  isSecondaryIndex(i) ? ~i : i; }
+  public static int     encodeSecondaryIndex(int i) { return !isSecondaryIndex(i) ? ~i : i; }
+
+  // secondary entries hold invokedynamic call site bindings
+  public ConstantPoolCacheEntry getSecondaryEntryAt(int i) {
+    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i));
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(e.isSecondaryEntry(), "must be a secondary entry");
+    }
+    return e;
+  }
+
+  public ConstantPoolCacheEntry getMainEntryAt(int i) {
+    if (isSecondaryIndex(i)) {
+      // run through an extra level of indirection:
+      i = getSecondaryEntryAt(i).getMainEntryIndex();
+    }
+    ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i);
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry");
+    }
+    return e;
+  }
+
   public int getIntAt(int entry, int fld) {
     //alignObjectSize ?
     long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java	Fri Jul 02 01:36:15 2010 -0700
@@ -28,6 +28,7 @@
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.types.*;
+import sun.jvm.hotspot.utilities.*;
 
 public class ConstantPoolCacheEntry {
   private static long          size;
@@ -67,9 +68,23 @@
   }
 
   public int getConstantPoolIndex() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(!isSecondaryEntry(), "must not be a secondary CP entry");
+    }
     return (int) (getIndices() & 0xFFFF);
   }
 
+  public boolean isSecondaryEntry() {
+    return (getIndices() & 0xFFFF) == 0;
+  }
+
+  public int getMainEntryIndex() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(isSecondaryEntry(), "must be a secondary CP entry");
+    }
+    return (int) (getIndices() >>> 16);
+  }
+
   private long getIndices() {
       return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned());
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java	Fri Jul 02 01:36:15 2010 -0700
@@ -566,6 +566,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
         int idx = currentBC.getIndexBig();
@@ -605,6 +606,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         // FIXME: print signature of referenced method (need more
         // accessors in ConstantPool and ConstantPoolCache)
         int idx = currentBC.getIndexBig();
@@ -1134,6 +1136,7 @@
       case Bytecodes._invokespecial:
       case Bytecodes._invokestatic:
       case Bytecodes._invokeinterface:
+      case Bytecodes._invokedynamic:
         _itr_send = itr;
         _report_result_for_send = true;
         break;
@@ -1379,6 +1382,7 @@
     case Bytecodes._invokevirtual:
     case Bytecodes._invokespecial:     doMethod(false, false, itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._invokestatic:      doMethod(true,  false, itr.getIndexBig(), itr.bci()); break;
+    case Bytecodes._invokedynamic:     doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._invokeinterface:   doMethod(false, true,  itr.getIndexBig(), itr.bci()); break;
     case Bytecodes._newarray:
     case Bytecodes._anewarray:         ppNewRef(vCTS, itr.bci()); break;
@@ -1725,7 +1729,7 @@
   void  doMethod                            (boolean is_static, boolean is_interface, int idx, int bci) {
     // Dig up signature for field in constant pool
     ConstantPool cp       = _method.getConstants();
-    int nameAndTypeIdx    = cp.getNameAndTypeRefIndexAt(idx);
+    int nameAndTypeIdx    = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx);
     int signatureIdx      = cp.getSignatureRefIndexAt(nameAndTypeIdx);
     Symbol signature      = cp.getSymbolAt(signatureIdx);
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Fri Jul 02 01:36:15 2010 -0700
@@ -40,6 +40,19 @@
     public static final int JVM_CONSTANT_Methodref          = 10;
     public static final int JVM_CONSTANT_InterfaceMethodref = 11;
     public static final int JVM_CONSTANT_NameAndType        = 12;
+    public static final int JVM_CONSTANT_MethodHandle       = 15;
+    public static final int JVM_CONSTANT_MethodType         = 16;
+
+    // JVM_CONSTANT_MethodHandle subtypes
+    public static final int JVM_REF_getField                = 1;
+    public static final int JVM_REF_getStatic               = 2;
+    public static final int JVM_REF_putField                = 3;
+    public static final int JVM_REF_putStatic               = 4;
+    public static final int JVM_REF_invokeVirtual           = 5;
+    public static final int JVM_REF_invokeStatic            = 6;
+    public static final int JVM_REF_invokeSpecial           = 7;
+    public static final int JVM_REF_newInvokeSpecial        = 8;
+    public static final int JVM_REF_invokeInterface         = 9;
 
     // HotSpot specific constant pool constant types.
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Jul 02 01:36:15 2010 -0700
@@ -54,14 +54,34 @@
 
     }
 
-    protected short getConstantPoolIndex(int bci) {
+    protected short getConstantPoolIndex(int rawcode, int bci) {
        // get ConstantPool index from ConstantPoolCacheIndex at given bci
-       short cpCacheIndex = method.getBytecodeShortArg(bci);
+       String fmt = Bytecodes.format(rawcode);
+       int cpCacheIndex;
+       switch (fmt.length()) {
+       case 2: cpCacheIndex = method.getBytecodeByteArg(bci); break;
+       case 3: cpCacheIndex = method.getBytecodeShortArg(bci); break;
+       case 5:
+           if (fmt.indexOf("__") >= 0)
+               cpCacheIndex = method.getBytecodeShortArg(bci);
+           else
+               cpCacheIndex = method.getBytecodeIntArg(bci);
+           break;
+       default: throw new IllegalArgumentException();
+       }
        if (cpCache == null) {
-          return cpCacheIndex;
+          return (short) cpCacheIndex;
+       } else if (fmt.indexOf("JJJJ") >= 0) {
+          // change byte-ordering and go via secondary cache entry
+          return (short) cpCache.getMainEntryAt(bytes.swapInt(cpCacheIndex)).getConstantPoolIndex();
+       } else if (fmt.indexOf("JJ") >= 0) {
+          // change byte-ordering and go via cache
+          return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
+       } else if (fmt.indexOf("j") >= 0) {
+          // go via cache
+          return (short) cpCache.getEntryAt((int) (0xFF & cpCacheIndex)).getConstantPoolIndex();
        } else {
-          // change byte-ordering and go via cache
-          return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort(cpCacheIndex))).getConstantPoolIndex();
+          return (short) cpCacheIndex;
        }
     }
 
@@ -100,10 +120,31 @@
                 case Bytecodes._invokespecial:
                 case Bytecodes._invokestatic:
                 case Bytecodes._invokeinterface: {
-                    cpoolIndex = getConstantPoolIndex(bci + 1);
+                    cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
                     writeShort(code, bci + 1, cpoolIndex);
                     break;
                 }
+
+                case Bytecodes._invokedynamic:
+                    cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                    writeShort(code, bci + 1, cpoolIndex);
+                    writeShort(code, bci + 3, (short)0);  // clear out trailing bytes
+                    break;
+
+                case Bytecodes._ldc_w:
+                    if (hotspotcode != bytecode) {
+                        // fast_aldc_w puts constant in CP cache
+                        cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                        writeShort(code, bci + 1, cpoolIndex);
+                    }
+                    break;
+                case Bytecodes._ldc:
+                    if (hotspotcode != bytecode) {
+                        // fast_aldc puts constant in CP cache
+                        cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
+                        code[bci + 1] = (byte)(cpoolIndex);
+                    }
+                    break;
             }
 
             len = Bytecodes.lengthFor(bytecode);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Fri Jul 02 01:36:15 2010 -0700
@@ -61,10 +61,12 @@
     protected short  _signatureIndex;
 
     protected static int extractHighShortFromInt(int val) {
+        // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
         return (val >> 16) & 0xFFFF;
     }
 
     protected static int extractLowShortFromInt(int val) {
+        // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
         return val & 0xFFFF;
     }
 
@@ -297,6 +299,28 @@
                                         + ", type = " + signatureIndex);
                      break;
                 }
+
+                case JVM_CONSTANT_MethodHandle: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short refIndex = (short) extractHighShortFromInt(value);
+                     byte  refKind  = (byte)  extractLowShortFromInt(value);
+                     dos.writeByte(refKind);
+                     dos.writeShort(refIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = MH index = " + refIndex
+                                        + ", kind = " + refKind);
+                     break;
+                }
+
+                case JVM_CONSTANT_MethodType: {
+                     dos.writeByte(cpConstType);
+                     int value = cpool.getIntAt(ci);
+                     short refIndex = (short) value;
+                     dos.writeShort(refIndex);
+                     if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
+                     break;
+                }
+
                 default:
                   throw new InternalError("Unknown tag: " + cpConstType);
             } // switch
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Jul 02 01:36:15 2010 -0700
@@ -572,6 +572,16 @@
                buf.cell(Integer.toString(cpool.getIntAt(index)));
                break;
 
+            case JVM_CONSTANT_MethodHandle:
+               buf.cell("JVM_CONSTANT_MethodHandle");
+               buf.cell(genLowHighShort(cpool.getIntAt(index)));
+               break;
+
+            case JVM_CONSTANT_MethodType:
+               buf.cell("JVM_CONSTANT_MethodType");
+               buf.cell(Integer.toString(cpool.getIntAt(index)));
+               break;
+
             default:
                throw new InternalError("unknown tag: " + ctag);
          }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java	Fri Jul 02 01:36:15 2010 -0700
@@ -38,12 +38,26 @@
   private static int JVM_CONSTANT_Methodref               = 10;
   private static int JVM_CONSTANT_InterfaceMethodref      = 11;
   private static int JVM_CONSTANT_NameAndType             = 12;
+  private static int JVM_CONSTANT_MethodHandle            = 15;  // JSR 292
+  private static int JVM_CONSTANT_MethodType              = 16;  // JSR 292
   private static int JVM_CONSTANT_Invalid                 = 0;   // For bad value initialization
   private static int JVM_CONSTANT_UnresolvedClass         = 100; // Temporary tag until actual use
   private static int JVM_CONSTANT_ClassIndex              = 101; // Temporary tag while constructing constant pool
   private static int JVM_CONSTANT_UnresolvedString        = 102; // Temporary tag until actual use
   private static int JVM_CONSTANT_StringIndex             = 103; // Temporary tag while constructing constant pool
   private static int JVM_CONSTANT_UnresolvedClassInError  = 104; // Resolution failed
+  private static int JVM_CONSTANT_Object                  = 105; // Required for BoundMethodHandle arguments.
+
+  // JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure
+  private static int JVM_REF_getField                = 1;
+  private static int JVM_REF_getStatic               = 2;
+  private static int JVM_REF_putField                = 3;
+  private static int JVM_REF_putStatic               = 4;
+  private static int JVM_REF_invokeVirtual           = 5;
+  private static int JVM_REF_invokeStatic            = 6;
+  private static int JVM_REF_invokeSpecial           = 7;
+  private static int JVM_REF_newInvokeSpecial        = 8;
+  private static int JVM_REF_invokeInterface         = 9;
 
   private byte tag;
 
@@ -62,6 +76,8 @@
   public boolean isDouble()           { return tag == JVM_CONSTANT_Double; }
   public boolean isNameAndType()      { return tag == JVM_CONSTANT_NameAndType; }
   public boolean isUtf8()             { return tag == JVM_CONSTANT_Utf8; }
+  public boolean isMethodHandle()     { return tag == JVM_CONSTANT_MethodHandle; }
+  public boolean isMethodType()       { return tag == JVM_CONSTANT_MethodType; }
 
   public boolean isInvalid()          { return tag == JVM_CONSTANT_Invalid; }
 
@@ -73,6 +89,8 @@
   public boolean isUnresolvedString()       { return tag == JVM_CONSTANT_UnresolvedString; }
   public boolean isStringIndex()            { return tag == JVM_CONSTANT_StringIndex; }
 
+  public boolean isObject()                 { return tag == JVM_CONSTANT_Object; }
+
   public boolean isKlassReference()   { return isKlassIndex() || isUnresolvedKlass(); }
   public boolean isFieldOrMethod()    { return isField() || isMethod() || isInterfaceMethod(); }
   public boolean isSymbol()           { return isUtf8(); }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Fri Jul 02 01:36:15 2010 -0700
@@ -825,6 +825,8 @@
    }
    writeln("");
    disAsm.decode(new sapkg.interpreter.BytecodeVisitor() {
+                    prologue: function(method) { },
+                    epilogue: function() { },
                     visit: function(bytecode) {
                        if (hasLines) {
                           var line = method.getLineNumberFromBCI(bci);
--- a/hotspot/make/linux/makefiles/adlc.make	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/make/linux/makefiles/adlc.make	Fri Jul 02 01:36:15 2010 -0700
@@ -138,7 +138,11 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
+# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
+# so skip it for 3.2 and ealier.
+ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
 ADLCFLAGS += -g
+endif
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -318,6 +318,31 @@
   __ bind(exit);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  Register Rcache = G3_scratch;
+  Register Rscratch = G4_scratch;
+
+  resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
+
+  __ verify_oop(Otos_i);
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label retry, resolved, Long, exit;
@@ -1994,6 +2019,8 @@
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
+    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
+    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
     default                         : ShouldNotReachHere();                                 break;
   }
   // first time invocation - must resolve first
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -375,6 +375,32 @@
   __ bind(Done);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  const Register cache = rcx;
+  const Register index = rdx;
+
+  resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  if (VerifyOops) {
+    __ verify_oop(rax);
+  }
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label Long, Done;
@@ -2055,6 +2081,8 @@
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
+    case Bytecodes::_fast_aldc      : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
+    case Bytecodes::_fast_aldc_w    : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);     break;
     default                         : ShouldNotReachHere();                                 break;
   }
   __ movl(temp, (int)bytecode());
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -389,6 +389,32 @@
   __ bind(Done);
 }
 
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) {
+  transition(vtos, atos);
+
+  if (!EnableMethodHandles) {
+    // We should not encounter this bytecode if !EnableMethodHandles.
+    // The verifier will stop it.  However, if we get past the verifier,
+    // this will stop the thread in a reasonable way, without crashing the JVM.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                     InterpreterRuntime::throw_IncompatibleClassChangeError));
+    // the call_VM checks for exception, so we should never return here.
+    __ should_not_reach_here();
+    return;
+  }
+
+  const Register cache = rcx;
+  const Register index = rdx;
+
+  resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
+  if (VerifyOops) {
+    __ verify_oop(rax);
+  }
+}
+
 void TemplateTable::ldc2_w() {
   transition(vtos, vtos);
   Label Long, Done;
@@ -2063,6 +2089,12 @@
   case Bytecodes::_invokedynamic:
     entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
     break;
+  case Bytecodes::_fast_aldc:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+    break;
+  case Bytecodes::_fast_aldc_w:
+    entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+    break;
   default:
     ShouldNotReachHere();
     break;
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 VM_Version::CpuidInfo VM_Version::_cpuid_info   = { 0, };
 
 static BufferBlob* stub_blob;
-static const int stub_size = 300;
+static const int stub_size = 400;
 
 extern "C" {
   typedef void (*getPsrInfo_stub_t)(void*);
@@ -56,7 +56,7 @@
     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
     const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
 
-    Label detect_486, cpu486, detect_586, std_cpuid1;
+    Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label ext_cpuid1, ext_cpuid5, done;
 
     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
@@ -131,13 +131,62 @@
     __ movl(Address(rsi, 8), rcx);
     __ movl(Address(rsi,12), rdx);
 
-    __ cmpl(rax, 3);     // Is cpuid(0x4) supported?
-    __ jccb(Assembler::belowEqual, std_cpuid1);
+    __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
+    __ jccb(Assembler::belowEqual, std_cpuid4);
+
+    //
+    // cpuid(0xB) Processor Topology
+    //
+    __ movl(rax, 0xb);
+    __ xorl(rcx, rcx);   // Threads level
+    __ cpuid();
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 1);     // Cores level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 2);     // Packages level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
 
     //
     // cpuid(0x4) Deterministic cache params
     //
+    __ bind(std_cpuid4);
     __ movl(rax, 4);
+    __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
+    __ jccb(Assembler::greater, std_cpuid1);
+
     __ xorl(rcx, rcx);   // L1 cache
     __ cpuid();
     __ push(rax);
@@ -460,13 +509,18 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( AllocatePrefetchStyle == 2 && is_intel() &&
-      cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core
+  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
+    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
 #ifdef _LP64
-    AllocatePrefetchDistance = 384;
+      AllocatePrefetchDistance = 384;
 #else
-    AllocatePrefetchDistance = 320;
+      AllocatePrefetchDistance = 320;
 #endif
+    }
+    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+      AllocatePrefetchDistance = 192;
+      AllocatePrefetchLines = 4;
+    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,14 @@
     } bits;
   };
 
+  union TplCpuidBEbx {
+    uint32_t value;
+    struct {
+      uint32_t logical_cpus : 16,
+                            : 16;
+    } bits;
+  };
+
   union ExtCpuid1Ecx {
     uint32_t value;
     struct {
@@ -211,6 +219,25 @@
     uint32_t     dcp_cpuid4_ecx; // unused currently
     uint32_t     dcp_cpuid4_edx; // unused currently
 
+    // cpuid function 0xB (processor topology)
+    // ecx = 0
+    uint32_t     tpl_cpuidB0_eax;
+    TplCpuidBEbx tpl_cpuidB0_ebx;
+    uint32_t     tpl_cpuidB0_ecx; // unused currently
+    uint32_t     tpl_cpuidB0_edx; // unused currently
+
+    // ecx = 1
+    uint32_t     tpl_cpuidB1_eax;
+    TplCpuidBEbx tpl_cpuidB1_ebx;
+    uint32_t     tpl_cpuidB1_ecx; // unused currently
+    uint32_t     tpl_cpuidB1_edx; // unused currently
+
+    // ecx = 2
+    uint32_t     tpl_cpuidB2_eax;
+    TplCpuidBEbx tpl_cpuidB2_ebx;
+    uint32_t     tpl_cpuidB2_ecx; // unused currently
+    uint32_t     tpl_cpuidB2_edx; // unused currently
+
     // cpuid function 0x80000000 // example, unused
     uint32_t ext_max_function;
     uint32_t ext_vendor_name_0;
@@ -316,6 +343,9 @@
   static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
   static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
   static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
+  static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
+  static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
+  static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
 
   // Initialization
   static void initialize();
@@ -349,7 +379,12 @@
   static uint cores_per_cpu()  {
     uint result = 1;
     if (is_intel()) {
-      result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      if (_cpuid_info.std_max_function >= 0xB) {
+        result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
+                 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+      } else {
+        result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      }
     } else if (is_amd()) {
       result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
     }
@@ -358,7 +393,9 @@
 
   static uint threads_per_core()  {
     uint result = 1;
-    if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
+    if (is_intel() && _cpuid_info.std_max_function >= 0xB) {
+      result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+    } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
       result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
                cores_per_cpu();
     }
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -820,7 +820,7 @@
                                            bool      is_top_frame) {
   assert(popframe_extra_args == 0, "what to do?");
   assert(!is_top_frame || (!callee_locals && !callee_param_count),
-         "top frame should have no caller")
+         "top frame should have no caller");
 
   // This code must exactly match what InterpreterFrame::build
   // does (the full InterpreterFrame::build, that is, not the
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -123,7 +123,7 @@
 
   int set_interrupt_callback    (Sync_Interrupt_Callback * cb);
   void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
-  void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
+  void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
 
  // ***************************************************************
  // java.lang.Thread.interrupt state.
--- a/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -26,7 +26,7 @@
 #ifdef AMD64
   (void)memmove(to, from, count * HeapWordSize);
 #else
-  // Same as pd_aligned_conjoint_words, except includes a zero-count check.
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6         ;"
                    "        jz      7f            ;"
@@ -84,7 +84,7 @@
     break;
   }
 #else
-  // Same as pd_aligned_disjoint_words, except includes a zero-count check.
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6       ;"
                    "        jz      3f          ;"
@@ -130,75 +130,18 @@
 }
 
 static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AMD64
-  (void)memmove(to, from, count * HeapWordSize);
-#else
-  // Same as pd_conjoint_words, except no zero-count check.
-  intx temp;
-  __asm__ volatile("        cmpl    %4,%5         ;"
-                   "        leal    -4(%4,%6,4),%3;"
-                   "        jbe     1f            ;"
-                   "        cmpl    %7,%5         ;"
-                   "        jbe     4f            ;"
-                   "1:      cmpl    $32,%6        ;"
-                   "        ja      3f            ;"
-                   "        subl    %4,%1         ;"
-                   "2:      movl    (%4),%3       ;"
-                   "        movl    %7,(%5,%4,1)  ;"
-                   "        addl    $4,%0         ;"
-                   "        subl    $1,%2          ;"
-                   "        jnz     2b            ;"
-                   "        jmp     7f            ;"
-                   "3:      rep;    smovl         ;"
-                   "        jmp     7f            ;"
-                   "4:      cmpl    $32,%2        ;"
-                   "        movl    %7,%0         ;"
-                   "        leal    -4(%5,%6,4),%1;"
-                   "        ja      6f            ;"
-                   "        subl    %4,%1         ;"
-                   "5:      movl    (%4),%3       ;"
-                   "        movl    %7,(%5,%4,1)  ;"
-                   "        subl    $4,%0         ;"
-                   "        subl    $1,%2          ;"
-                   "        jnz     5b            ;"
-                   "        jmp     7f            ;"
-                   "6:      std                   ;"
-                   "        rep;    smovl         ;"
-                   "        cld                   ;"
-                   "7:      nop                    "
-                   : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
-                   : "0"  (from), "1"  (to), "2"  (count), "3"  (temp)
-                   : "memory", "flags");
-#endif // AMD64
+  pd_conjoint_words(from, to, count);
 }
 
 static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AMD64
   pd_disjoint_words(from, to, count);
-#else
-  // Same as pd_disjoint_words, except no zero-count check.
-  intx temp;
-  __asm__ volatile("        cmpl    $32,%6      ;"
-                   "        ja      2f          ;"
-                   "        subl    %4,%1       ;"
-                   "1:      movl    (%4),%3     ;"
-                   "        movl    %7,(%5,%4,1);"
-                   "        addl    $4,%0       ;"
-                   "        subl    $1,%2        ;"
-                   "        jnz     1b          ;"
-                   "        jmp     3f          ;"
-                   "2:      rep;    smovl       ;"
-                   "3:      nop                  "
-                   : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
-                   : "0"  (from), "1"  (to), "2"  (count), "3"  (temp)
-                   : "memory", "cc");
-#endif // AMD64
 }
 
 static void pd_conjoint_bytes(void* from, void* to, size_t count) {
 #ifdef AMD64
   (void)memmove(to, from, count);
 #else
+  // Includes a zero-count check.
   intx temp;
   __asm__ volatile("        testl   %6,%6          ;"
                    "        jz      13f            ;"
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Fri Jul 02 01:36:15 2010 -0700
@@ -121,10 +121,10 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            # byte count less prefix
-        andl     $3,%ecx              # suffix byte count
+5:      andl     $3,%ecx              # suffix byte count
         jz       7f                   # no suffix
         # copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -159,10 +159,10 @@
         # copy dwords, aligned or not
 3:      rep;     smovl
 4:      movl     %eax,%ecx            # byte count
-        andl     $3,%ecx              # suffix byte count
+5:      andl     $3,%ecx              # suffix byte count
         jz       7f                   # no suffix
         # copy suffix
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -214,10 +214,10 @@
         # copy aligned dwords
 3:      rep;     smovl
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
         # copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -250,9 +250,9 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -287,11 +287,12 @@
         andl     $3,%eax              # either 0 or 2
         jz       1f                   # no prefix
         # copy prefix
+        subl     $1,%ecx
+        jl       5f                   # zero count
         movw     (%esi),%dx
         movw     %dx,(%edi)
         addl     %eax,%esi            # %eax == 2
         addl     %eax,%edi
-        subl     $1,%ecx
 1:      movl     %ecx,%eax            # word count less prefix
         sarl     %ecx                 # dword count
         jz       4f                   # no dwords to move
@@ -454,12 +455,13 @@
         ret
         .=.+10
 2:      subl     %esi,%edi
+        jmp      4f
         .p2align 4,,15
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        subl     $1,%ecx
-        jnz      3b
+4:      subl     $1,%ecx
+        jge      3b
         popl     %edi
         popl     %esi
         ret
@@ -467,19 +469,20 @@
         std
         leal     -4(%edi,%ecx,4),%edi # to + count*4 - 4
         cmpl     $32,%ecx
-        ja       3f                   # > 32 dwords
+        ja       4f                   # > 32 dwords
         subl     %eax,%edi            # eax == from + count*4 - 4
+        jmp      3f
         .p2align 4,,15
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        subl     $1,%ecx
-        jnz      2b
+3:      subl     $1,%ecx
+        jge      2b
         cld
         popl     %edi
         popl     %esi
         ret
-3:      movl     %eax,%esi            # from + count*4 - 4
+4:      movl     %eax,%esi            # from + count*4 - 4
         rep;     smovl
         cld
         popl     %edi
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -861,7 +861,7 @@
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 
-extern "C" _solaris_raw_setup_fpu(address ptr);
+extern "C" void _solaris_raw_setup_fpu(address ptr);
 void os::setup_fpu() {
   address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
   _solaris_raw_setup_fpu(fpu_cntrl);
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Fri Jul 02 01:36:15 2010 -0700
@@ -154,10 +154,10 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx            / byte count less prefix
-        andl     $3,%ecx              / suffix byte count
+5:      andl     $3,%ecx              / suffix byte count
         jz       7f                   / no suffix
         / copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -192,10 +192,10 @@
         / copy dwords, aligned or not
 3:      rep;     smovl
 4:      movl     %eax,%ecx            / byte count
-        andl     $3,%ecx              / suffix byte count
+5:      andl     $3,%ecx              / suffix byte count
         jz       7f                   / no suffix
         / copy suffix
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -246,10 +246,10 @@
         / copy aligned dwords
 3:      rep;     smovl
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
         / copy suffix
-5:      xorl     %eax,%eax
+        xorl     %eax,%eax
 6:      movb     (%esi,%eax,1),%dl
         movb     %dl,(%edi,%eax,1)
         addl     $1,%eax
@@ -282,9 +282,9 @@
         jnz      3b
         addl     %esi,%edi
 4:      movl     %eax,%ecx
-        andl     $3,%ecx
+5:      andl     $3,%ecx
         jz       7f
-5:      subl     %esi,%edi
+        subl     %esi,%edi
         addl     $3,%esi
 6:      movb     (%esi),%dl
         movb     %dl,(%edi,%esi,1)
@@ -318,11 +318,12 @@
         andl     $3,%eax              / either 0 or 2
         jz       1f                   / no prefix
         / copy prefix
+        subl     $1,%ecx
+        jl       5f                   / zero count
         movw     (%esi),%dx
         movw     %dx,(%edi)
         addl     %eax,%esi            / %eax == 2
         addl     %eax,%edi
-        subl     $1,%ecx
 1:      movl     %ecx,%eax            / word count less prefix
         sarl     %ecx                 / dword count
         jz       4f                   / no dwords to move
@@ -482,12 +483,13 @@
         ret
         .=.+10
 2:      subl     %esi,%edi
+        jmp      4f
         .align   16
 3:      movl     (%esi),%edx
         movl     %edx,(%edi,%esi,1)
         addl     $4,%esi
-        subl     $1,%ecx
-        jnz      3b
+4:      subl     $1,%ecx
+        jge      3b
         popl     %edi
         popl     %esi
         ret
@@ -495,19 +497,20 @@
         std
         leal     -4(%edi,%ecx,4),%edi / to + count*4 - 4
         cmpl     $32,%ecx
-        ja       3f                   / > 32 dwords
+        ja       4f                   / > 32 dwords
         subl     %eax,%edi            / eax == from + count*4 - 4
+        jmp      3f
         .align   16
 2:      movl     (%eax),%edx
         movl     %edx,(%edi,%eax,1)
         subl     $4,%eax
-        subl     $1,%ecx
-        jnz      2b
+3:      subl     $1,%ecx
+        jge      2b
         cld
         popl     %edi
         popl     %esi
         ret
-3:      movl     %eax,%esi            / from + count*4 - 4
+4:      movl     %eax,%esi            / from + count*4 - 4
         rep;     smovl
         cld
         popl     %edi
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -404,7 +404,7 @@
       locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
     } else {
       locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
-      Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
+      Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
       _locs_own = true;
     }
     _locs_start    = locs_start;
@@ -581,7 +581,7 @@
                              (HeapWord*)(buf+buf_offset),
                              (lsize + HeapWordSize-1) / HeapWordSize);
       } else {
-        Copy::conjoint_bytes(lstart, buf+buf_offset, lsize);
+        Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
       }
     }
     buf_offset += lsize;
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -242,10 +242,10 @@
   code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
                                         locs_buffer_size / sizeof(relocInfo));
   code->initialize_consts_size(Compilation::desired_max_constant_size());
-  // Call stubs + deopt/exception handler
+  // Call stubs + two deopt handlers (regular and MH) + exception handler
   code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
                               LIR_Assembler::exception_handler_size +
-                              LIR_Assembler::deopt_handler_size);
+                              2 * LIR_Assembler::deopt_handler_size);
 }
 
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -878,15 +878,12 @@
       case T_OBJECT :
        {
         ciObject* obj = con.as_object();
-        if (obj->is_klass()) {
-          ciKlass* klass = obj->as_klass();
-          if (!klass->is_loaded() || PatchALot) {
-            patch_state = state()->copy();
-            t = new ObjectConstant(obj);
-          } else {
-            t = new InstanceConstant(klass->java_mirror());
-          }
+        if (!obj->is_loaded()
+            || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
+          patch_state = state()->copy();
+          t = new ObjectConstant(obj);
         } else {
+          assert(!obj->is_klass(), "must be java_mirror of klass");
           t = new InstanceConstant(obj->as_instance());
         }
         break;
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -601,7 +601,7 @@
 
 
 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
-  Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci));
+  Bytecode_field* field_access = Bytecode_field_at(caller, bci);
   // This can be static or non-static field access
   Bytecodes::Code code       = field_access->code();
 
@@ -721,7 +721,7 @@
   Handle load_klass(THREAD, NULL);                // oop needed by load_klass_patching code
   if (stub_id == Runtime1::access_field_patching_id) {
 
-    Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci));
+    Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
     FieldAccessInfo result; // initialize class if needed
     Bytecodes::Code code = field_access->code();
     constantPoolHandle constants(THREAD, caller_method->constants());
@@ -781,11 +781,9 @@
       case Bytecodes::_ldc:
       case Bytecodes::_ldc_w:
         {
-          Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(),
-                                                               caller_method->bcp_from(bci));
-          klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK);
-          // ldc wants the java mirror.
-          k = resolved->klass_part()->java_mirror();
+          Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
+          k = cc->resolve_constant(CHECK);
+          assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
         }
         break;
       default: Unimplemented();
@@ -816,6 +814,15 @@
     // Return to the now deoptimized frame.
   }
 
+  // If we are patching in a non-perm oop, make sure the nmethod
+  // is on the right list.
+  if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) {
+    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
+    if (!nm->on_scavenge_root_list())
+      CodeCache::add_scavenge_root_nmethod(nm);
+  }
 
   // Now copy code back
 
@@ -1115,7 +1122,7 @@
   if (length == 0) return;
   // Not guaranteed to be word atomic, but that doesn't matter
   // for anything but an oop array, which is covered by oop_arraycopy.
-  Copy::conjoint_bytes(src, dst, length);
+  Copy::conjoint_jbytes(src, dst, length);
 JRT_END
 
 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
--- a/hotspot/src/share/vm/ci/ciCPCache.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciCPCache.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -44,13 +44,23 @@
 // ciCPCache::is_f1_null_at
 bool ciCPCache::is_f1_null_at(int index) {
   VM_ENTRY_MARK;
-  constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop();
-  oop f1 = cpcache->secondary_entry_at(index)->f1();
+  oop f1 = entry_at(index)->f1();
   return (f1 == NULL);
 }
 
 
 // ------------------------------------------------------------------
+// ciCPCache::get_pool_index
+int ciCPCache::get_pool_index(int index) {
+  VM_ENTRY_MARK;
+  ConstantPoolCacheEntry* e = entry_at(index);
+  if (e->is_secondary_entry())
+    e = entry_at(e->main_entry_index());
+  return e->constant_pool_index();
+}
+
+
+// ------------------------------------------------------------------
 // ciCPCache::print
 //
 // Print debugging information about the cache.
--- a/hotspot/src/share/vm/ci/ciCPCache.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciCPCache.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -29,6 +29,18 @@
 // Note: This class is called ciCPCache as ciConstantPoolCache is used
 // for something different.
 class ciCPCache : public ciObject {
+private:
+  constantPoolCacheOop get_cpCacheOop() {   // must be called inside a VM_ENTRY_MARK
+    return (constantPoolCacheOop) get_oop();
+  }
+
+  ConstantPoolCacheEntry* entry_at(int i) {
+    int raw_index = i;
+    if (constantPoolCacheOopDesc::is_secondary_index(i))
+      raw_index = constantPoolCacheOopDesc::decode_secondary_index(i);
+    return get_cpCacheOop()->entry_at(raw_index);
+  }
+
 public:
   ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
 
@@ -41,5 +53,7 @@
 
   bool is_f1_null_at(int index);
 
+  int get_pool_index(int index);
+
   void print();
 };
--- a/hotspot/src/share/vm/ci/ciClassList.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciClassList.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -85,6 +85,7 @@
 friend class ciConstantPoolCache;      \
 friend class ciField;                  \
 friend class ciConstant;               \
+friend class ciCPCache;                \
 friend class ciFlags;                  \
 friend class ciExceptionHandler;       \
 friend class ciCallProfile;            \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -511,9 +511,22 @@
 //
 // Implementation of get_constant_by_index().
 ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
-                                             int index,
+                                             int pool_index, int cache_index,
                                              ciInstanceKlass* accessor) {
+  bool ignore_will_link;
   EXCEPTION_CONTEXT;
+  int index = pool_index;
+  if (cache_index >= 0) {
+    assert(index < 0, "only one kind of index at a time");
+    ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index);
+    index = cpc_entry->constant_pool_index();
+    oop obj = cpc_entry->f1();
+    if (obj != NULL) {
+      assert(obj->is_instance(), "must be an instance");
+      ciObject* ciobj = get_object(obj);
+      return ciConstant(T_OBJECT, ciobj);
+    }
+  }
   constantTag tag = cpool->tag_at(index);
   if (tag.is_int()) {
     return ciConstant(T_INT, (jint)cpool->int_at(index));
@@ -540,8 +553,7 @@
     return ciConstant(T_OBJECT, constant);
   } else if (tag.is_klass() || tag.is_unresolved_klass()) {
     // 4881222: allow ldc to take a class type
-    bool ignore;
-    ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
+    ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       record_out_of_memory_failure();
@@ -549,12 +561,26 @@
     }
     assert (klass->is_instance_klass() || klass->is_array_klass(),
             "must be an instance or array klass ");
-    return ciConstant(T_OBJECT, klass);
+    return ciConstant(T_OBJECT, klass->java_mirror());
   } else if (tag.is_object()) {
     oop obj = cpool->object_at(index);
     assert(obj->is_instance(), "must be an instance");
     ciObject* ciobj = get_object(obj);
     return ciConstant(T_OBJECT, ciobj);
+  } else if (tag.is_method_type()) {
+    // must execute Java code to link this CP entry into cache[i].f1
+    ciSymbol* signature = get_object(cpool->method_type_signature_at(index))->as_symbol();
+    ciObject* ciobj = get_unloaded_method_type_constant(signature);
+    return ciConstant(T_OBJECT, ciobj);
+  } else if (tag.is_method_handle()) {
+    // must execute Java code to link this CP entry into cache[i].f1
+    int ref_kind        = cpool->method_handle_ref_kind_at(index);
+    int callee_index    = cpool->method_handle_klass_index_at(index);
+    ciKlass* callee     = get_klass_by_index_impl(cpool, callee_index, ignore_will_link, accessor);
+    ciSymbol* name      = get_object(cpool->method_handle_name_ref_at(index))->as_symbol();
+    ciSymbol* signature = get_object(cpool->method_handle_signature_ref_at(index))->as_symbol();
+    ciObject* ciobj     = get_unloaded_method_handle_constant(callee, name, signature, ref_kind);
+    return ciConstant(T_OBJECT, ciobj);
   } else {
     ShouldNotReachHere();
     return ciConstant();
@@ -562,61 +588,15 @@
 }
 
 // ------------------------------------------------------------------
-// ciEnv::is_unresolved_string_impl
-//
-// Implementation of is_unresolved_string().
-bool ciEnv::is_unresolved_string_impl(instanceKlass* accessor, int index) const {
-  EXCEPTION_CONTEXT;
-  assert(accessor->is_linked(), "must be linked before accessing constant pool");
-  constantPoolOop cpool = accessor->constants();
-  constantTag tag = cpool->tag_at(index);
-  return tag.is_unresolved_string();
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_klass_impl
-//
-// Implementation of is_unresolved_klass().
-bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const {
-  EXCEPTION_CONTEXT;
-  assert(accessor->is_linked(), "must be linked before accessing constant pool");
-  constantPoolOop cpool = accessor->constants();
-  constantTag tag = cpool->tag_at(index);
-  return tag.is_unresolved_klass();
-}
-
-// ------------------------------------------------------------------
 // ciEnv::get_constant_by_index
 //
 // Pull a constant out of the constant pool.  How appropriate.
 //
 // Implementation note: this query is currently in no way cached.
 ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
-                                        int index,
+                                        int pool_index, int cache_index,
                                         ciInstanceKlass* accessor) {
-  GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);)
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_string
-//
-// Check constant pool
-//
-// Implementation note: this query is currently in no way cached.
-bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
-                                 int index) const {
-  GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
-}
-
-// ------------------------------------------------------------------
-// ciEnv::is_unresolved_klass
-//
-// Check constant pool
-//
-// Implementation note: this query is currently in no way cached.
-bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
-                                int index) const {
-  GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
+  GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);)
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -116,12 +116,8 @@
                                 bool& is_accessible,
                                 ciInstanceKlass* loading_klass);
   ciConstant get_constant_by_index(constantPoolHandle cpool,
-                                   int constant_index,
+                                   int pool_index, int cache_index,
                                    ciInstanceKlass* accessor);
-  bool       is_unresolved_string(ciInstanceKlass* loading_klass,
-                                   int constant_index) const;
-  bool       is_unresolved_klass(ciInstanceKlass* loading_klass,
-                                   int constant_index) const;
   ciField*   get_field_by_index(ciInstanceKlass* loading_klass,
                                 int field_index);
   ciMethod*  get_method_by_index(constantPoolHandle cpool,
@@ -137,12 +133,8 @@
                                      bool& is_accessible,
                                      ciInstanceKlass* loading_klass);
   ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
-                                        int constant_index,
+                                        int pool_index, int cache_index,
                                         ciInstanceKlass* loading_klass);
-  bool       is_unresolved_string_impl (instanceKlass* loading_klass,
-                                        int constant_index) const;
-  bool       is_unresolved_klass_impl (instanceKlass* loading_klass,
-                                        int constant_index) const;
   ciField*   get_field_by_index_impl(ciInstanceKlass* loading_klass,
                                      int field_index);
   ciMethod*  get_method_by_index_impl(constantPoolHandle cpool,
@@ -190,6 +182,25 @@
     return _factory->get_unloaded_klass(accessing_klass, name, true);
   }
 
+  // Get a ciKlass representing an unloaded klass mirror.
+  // Result is not necessarily unique, but will be unloaded.
+  ciInstance* get_unloaded_klass_mirror(ciKlass* type) {
+    return _factory->get_unloaded_klass_mirror(type);
+  }
+
+  // Get a ciInstance representing an unresolved method handle constant.
+  ciInstance* get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                  ciSymbol* name,
+                                                  ciSymbol* signature,
+                                                  int       ref_kind) {
+    return _factory->get_unloaded_method_handle_constant(holder, name, signature, ref_kind);
+  }
+
+  // Get a ciInstance representing an unresolved method type constant.
+  ciInstance* get_unloaded_method_type_constant(ciSymbol* signature) {
+    return _factory->get_unloaded_method_type_constant(signature);
+  }
+
   // See if we already have an unloaded klass for the given name
   // or return NULL if not.
   ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) {
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -323,8 +323,8 @@
 // ciInstanceKlass::java_mirror
 //
 // Get the instance of java.lang.Class corresponding to this klass.
+// Cache it on this->_java_mirror.
 ciInstance* ciInstanceKlass::java_mirror() {
-  assert(is_loaded(), "must be loaded");
   if (_java_mirror == NULL) {
     _java_mirror = ciKlass::java_mirror();
   }
--- a/hotspot/src/share/vm/ci/ciKlass.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciKlass.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -192,8 +192,14 @@
 
 // ------------------------------------------------------------------
 // ciKlass::java_mirror
+//
+// Get the instance of java.lang.Class corresponding to this klass.
+// If it is an unloaded instance or array klass, return an unloaded
+// mirror object of type Class.
 ciInstance* ciKlass::java_mirror() {
   GUARDED_VM_ENTRY(
+    if (!is_loaded())
+      return ciEnv::current()->get_unloaded_klass_mirror(this);
     oop java_mirror = get_Klass()->java_mirror();
     return CURRENT_ENV->get_object(java_mirror)->as_instance();
   )
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -70,6 +70,7 @@
 
   _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL);
   _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL);
+  _unloaded_instances = new (arena) GrowableArray<ciInstance*>(arena, 4, 0, NULL);
   _return_addresses =
     new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL);
 }
@@ -443,6 +444,74 @@
   return new_klass;
 }
 
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_instance
+//
+// Get a ciInstance representing an as-yet undetermined instance of a given class.
+//
+ciInstance* ciObjectFactory::get_unloaded_instance(ciInstanceKlass* instance_klass) {
+  for (int i=0; i<_unloaded_instances->length(); i++) {
+    ciInstance* entry = _unloaded_instances->at(i);
+    if (entry->klass()->equals(instance_klass)) {
+      // We've found a match.
+      return entry;
+    }
+  }
+
+  // This is a new unloaded instance.  Create it and stick it in
+  // the cache.
+  ciInstance* new_instance = new (arena()) ciInstance(instance_klass);
+
+  init_ident_of(new_instance);
+  _unloaded_instances->append(new_instance);
+
+  // make sure it looks the way we want:
+  assert(!new_instance->is_loaded(), "");
+  assert(new_instance->klass() == instance_klass, "");
+
+  return new_instance;
+}
+
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_klass_mirror
+//
+// Get a ciInstance representing an unresolved klass mirror.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_klass_mirror(ciKlass*  type) {
+  assert(ciEnv::_Class_klass != NULL, "");
+  return get_unloaded_instance(ciEnv::_Class_klass->as_instance_klass());
+}
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_method_handle_constant
+//
+// Get a ciInstance representing an unresolved method handle constant.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                                 ciSymbol* name,
+                                                                 ciSymbol* signature,
+                                                                 int       ref_kind) {
+  if (ciEnv::_MethodHandle_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_MethodHandle_klass->as_instance_klass());
+}
+
+//------------------------------------------------------------------
+// ciObjectFactory::get_unloaded_method_type_constant
+//
+// Get a ciInstance representing an unresolved method type constant.
+//
+// Currently, this ignores the parameters and returns a unique unloaded instance.
+ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signature) {
+  if (ciEnv::_MethodType_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
+}
+
+
+
 //------------------------------------------------------------------
 // ciObjectFactory::get_empty_methodData
 //
@@ -637,7 +706,8 @@
 //
 // Print debugging information about the object factory
 void ciObjectFactory::print() {
-  tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_klasses=%d>",
+  tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_instances=%d unloaded_klasses=%d>",
              _ci_objects->length(), _unloaded_methods->length(),
+             _unloaded_instances->length(),
              _unloaded_klasses->length());
 }
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -39,6 +39,7 @@
   GrowableArray<ciObject*>* _ci_objects;
   GrowableArray<ciMethod*>* _unloaded_methods;
   GrowableArray<ciKlass*>* _unloaded_klasses;
+  GrowableArray<ciInstance*>* _unloaded_instances;
   GrowableArray<ciReturnAddress*>* _return_addresses;
   int                       _next_ident;
 
@@ -73,6 +74,8 @@
 
   void print_contents_impl();
 
+  ciInstance* get_unloaded_instance(ciInstanceKlass* klass);
+
 public:
   static bool is_initialized() { return _initialized; }
 
@@ -98,6 +101,18 @@
                               ciSymbol* name,
                               bool create_if_not_found);
 
+  // Get a ciInstance representing an unresolved klass mirror.
+  ciInstance* get_unloaded_klass_mirror(ciKlass* type);
+
+  // Get a ciInstance representing an unresolved method handle constant.
+  ciInstance* get_unloaded_method_handle_constant(ciKlass*  holder,
+                                                  ciSymbol* name,
+                                                  ciSymbol* signature,
+                                                  int       ref_kind);
+
+  // Get a ciInstance representing an unresolved method type constant.
+  ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
+
 
   // Get the ciMethodData representing the methodData for a method
   // with none.
--- a/hotspot/src/share/vm/ci/ciStreams.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -186,12 +186,13 @@
 }
 
 // ------------------------------------------------------------------
-// ciBytecodeStream::get_constant_index
+// ciBytecodeStream::get_constant_raw_index
 //
 // If this bytecode is one of the ldc variants, get the index of the
 // referenced constant.
-int ciBytecodeStream::get_constant_index() const {
-  switch(cur_bc()) {
+int ciBytecodeStream::get_constant_raw_index() const {
+  // work-alike for Bytecode_loadconstant::raw_index()
+  switch (cur_bc()) {
   case Bytecodes::_ldc:
     return get_index_u1();
   case Bytecodes::_ldc_w:
@@ -202,25 +203,52 @@
     return 0;
   }
 }
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_constant_pool_index
+// Decode any CP cache index into a regular pool index.
+int ciBytecodeStream::get_constant_pool_index() const {
+  // work-alike for Bytecode_loadconstant::pool_index()
+  int index = get_constant_raw_index();
+  if (has_cache_index()) {
+    return get_cpcache()->get_pool_index(index);
+  }
+  return index;
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_constant_cache_index
+// Return the CP cache index, or -1 if there isn't any.
+int ciBytecodeStream::get_constant_cache_index() const {
+  // work-alike for Bytecode_loadconstant::cache_index()
+  return has_cache_index() ? get_constant_raw_index() : -1;
+}
+
 // ------------------------------------------------------------------
 // ciBytecodeStream::get_constant
 //
 // If this bytecode is one of the ldc variants, get the referenced
 // constant.
 ciConstant ciBytecodeStream::get_constant() {
+  int pool_index = get_constant_raw_index();
+  int cache_index = -1;
+  if (has_cache_index()) {
+    cache_index = pool_index;
+    pool_index = -1;
+  }
   VM_ENTRY_MARK;
   constantPoolHandle cpool(_method->get_methodOop()->constants());
-  return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder);
+  return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder);
 }
 
 // ------------------------------------------------------------------
-bool ciBytecodeStream::is_unresolved_string() const {
-  return CURRENT_ENV->is_unresolved_string(_holder, get_constant_index());
-}
-
-// ------------------------------------------------------------------
-bool ciBytecodeStream::is_unresolved_klass() const {
-  return CURRENT_ENV->is_unresolved_klass(_holder, get_klass_index());
+// ciBytecodeStream::get_constant_pool_tag
+//
+// If this bytecode is one of the ldc variants, get the referenced
+// constant.
+constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
+  VM_ENTRY_MARK;
+  return _method->get_methodOop()->constants()->tag_at(index);
 }
 
 // ------------------------------------------------------------------
@@ -378,13 +406,16 @@
 
 // ------------------------------------------------------------------
 // ciBytecodeStream::get_cpcache
-ciCPCache* ciBytecodeStream::get_cpcache() {
-  VM_ENTRY_MARK;
-  // Get the constant pool.
-  constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
-  constantPoolCacheOop cpcache = cpool->cache();
+ciCPCache* ciBytecodeStream::get_cpcache() const {
+  if (_cpcache == NULL) {
+    VM_ENTRY_MARK;
+    // Get the constant pool.
+    constantPoolOop      cpool   = _holder->get_instanceKlass()->constants();
+    constantPoolCacheOop cpcache = cpool->cache();
 
-  return CURRENT_ENV->get_object(cpcache)->as_cpcache();
+    *(ciCPCache**)&_cpcache = CURRENT_ENV->get_object(cpcache)->as_cpcache();
+  }
+  return _cpcache;
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciStreams.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -46,6 +46,7 @@
 
   ciMethod* _method;           // the method
   ciInstanceKlass* _holder;
+  ciCPCache* _cpcache;
   address _bc_start;            // Start of current bytecode for table
   address _was_wide;            // Address past last wide bytecode
   jint* _table_base;            // Aligned start of last table or switch
@@ -58,7 +59,9 @@
 
   void reset( address base, unsigned int size ) {
     _bc_start =_was_wide = 0;
-    _start = _pc = base; _end = base + size; }
+    _start = _pc = base; _end = base + size;
+    _cpcache = NULL;
+  }
 
   void assert_wide(bool require_wide) const {
     if (require_wide)
@@ -136,15 +139,20 @@
   bool is_wide() const { return ( _pc == _was_wide ); }
 
   // Does this instruction contain an index which refes into the CP cache?
-  bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
+  bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
 
   int get_index_u1() const {
     return bytecode()->get_index_u1(cur_bc_raw());
   }
 
+  int get_index_u1_cpcache() const {
+    return bytecode()->get_index_u1_cpcache(cur_bc_raw());
+  }
+
   // Get a byte index following this bytecode.
   // If prefixed with a wide bytecode, get a wide index.
   int get_index() const {
+    assert(!has_cache_index(), "else use cpcache variant");
     return (_pc == _was_wide)   // was widened?
       ? get_index_u2(true)      // yes, return wide index
       : get_index_u1();         // no, return narrow index
@@ -207,7 +215,9 @@
     return cur_bci() + get_int_table(index); }
 
   // --- Constant pool access ---
-  int get_constant_index() const;
+  int get_constant_raw_index() const;
+  int get_constant_pool_index() const;
+  int get_constant_cache_index() const;
   int get_field_index();
   int get_method_index();
 
@@ -217,12 +227,17 @@
   int get_klass_index() const;
 
   // If this bytecode is one of the ldc variants, get the referenced
-  // constant
+  // constant.  Do not attempt to resolve it, since that would require
+  // execution of Java code.  If it is not resolved, return an unloaded
+  // object (ciConstant.as_object()->is_loaded() == false).
   ciConstant get_constant();
-  // True if the ldc variant points to an unresolved string
-  bool is_unresolved_string() const;
-  // True if the ldc variant points to an unresolved klass
-  bool is_unresolved_klass() const;
+  constantTag get_constant_pool_tag(int index) const;
+
+  // True if the klass-using bytecode points to an unresolved klass
+  bool is_unresolved_klass() const {
+    constantTag tag = get_constant_pool_tag(get_klass_index());
+    return tag.is_unresolved_klass();
+  }
 
   // If this bytecode is one of get_field, get_static, put_field,
   // or put_static, get the referenced field.
@@ -238,7 +253,7 @@
   int       get_method_holder_index();
   int       get_method_signature_index();
 
-  ciCPCache*  get_cpcache();
+  ciCPCache*  get_cpcache() const;
   ciCallSite* get_call_site();
 };
 
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -712,10 +712,8 @@
     ciObject* obj = con.as_object();
     if (obj->is_null_object()) {
       push_null();
-    } else if (obj->is_klass()) {
-      // The type of ldc <class> is java.lang.Class
-      push_object(outer()->env()->Class_klass());
     } else {
+      assert(!obj->is_klass(), "must be java_mirror of klass");
       push_object(obj->klass());
     }
   } else {
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -117,6 +117,29 @@
           cp->string_index_at_put(index, string_index);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+      case JVM_CONSTANT_MethodType :
+        if (!EnableMethodHandles ||
+            _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+          classfile_parse_error(
+            (!EnableInvokeDynamic ?
+             "This JVM does not support constant tag %u in class file %s" :
+             "Class file version does not support constant tag %u in class file %s"),
+            tag, CHECK);
+        }
+        if (tag == JVM_CONSTANT_MethodHandle) {
+          cfs->guarantee_more(4, CHECK);  // ref_kind, method_index, tag/access_flags
+          u1 ref_kind = cfs->get_u1_fast();
+          u2 method_index = cfs->get_u2_fast();
+          cp->method_handle_index_at_put(index, ref_kind, method_index);
+        } else if (tag == JVM_CONSTANT_MethodType) {
+          cfs->guarantee_more(3, CHECK);  // signature_index, tag/access_flags
+          u2 signature_index = cfs->get_u2_fast();
+          cp->method_type_index_at_put(index, signature_index);
+        } else {
+          ShouldNotReachHere();
+        }
+        break;
       case JVM_CONSTANT_Integer :
         {
           cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
@@ -337,6 +360,60 @@
           cp->unresolved_string_at_put(index, sym);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+        {
+          int ref_index = cp->method_handle_index_at(index);
+          check_property(
+            valid_cp_range(ref_index, length) &&
+                EnableMethodHandles,
+              "Invalid constant pool index %u in class file %s",
+              ref_index, CHECK_(nullHandle));
+          constantTag tag = cp->tag_at(ref_index);
+          int ref_kind  = cp->method_handle_ref_kind_at(index);
+          switch (ref_kind) {
+          case JVM_REF_getField:
+          case JVM_REF_getStatic:
+          case JVM_REF_putField:
+          case JVM_REF_putStatic:
+            check_property(
+              tag.is_field(),
+              "Invalid constant pool index %u in class file %s (not a field)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          case JVM_REF_invokeVirtual:
+          case JVM_REF_invokeStatic:
+          case JVM_REF_invokeSpecial:
+          case JVM_REF_newInvokeSpecial:
+            check_property(
+              tag.is_method(),
+              "Invalid constant pool index %u in class file %s (not a method)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          case JVM_REF_invokeInterface:
+            check_property(
+              tag.is_interface_method(),
+              "Invalid constant pool index %u in class file %s (not an interface method)",
+              ref_index, CHECK_(nullHandle));
+            break;
+          default:
+            classfile_parse_error(
+              "Bad method handle kind at constant pool index %u in class file %s",
+              index, CHECK_(nullHandle));
+          }
+          // Keep the ref_index unchanged.  It will be indirected at link-time.
+        }
+        break;
+      case JVM_CONSTANT_MethodType :
+        {
+          int ref_index = cp->method_type_index_at(index);
+          check_property(
+            valid_cp_range(ref_index, length) &&
+                cp->tag_at(ref_index).is_utf8() &&
+                EnableMethodHandles,
+              "Invalid constant pool index %u in class file %s",
+              ref_index, CHECK_(nullHandle));
+        }
+        break;
       default:
         fatal(err_msg("bad constant pool tag value %u",
                       cp->tag_at(index).value()));
@@ -452,6 +529,43 @@
         }
         break;
       }
+      case JVM_CONSTANT_MethodHandle: {
+        int ref_index = cp->method_handle_index_at(index);
+        int ref_kind  = cp->method_handle_ref_kind_at(index);
+        switch (ref_kind) {
+        case JVM_REF_invokeVirtual:
+        case JVM_REF_invokeStatic:
+        case JVM_REF_invokeSpecial:
+        case JVM_REF_newInvokeSpecial:
+          {
+            int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index);
+            int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
+            symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
+            if (ref_kind == JVM_REF_newInvokeSpecial) {
+              if (name() != vmSymbols::object_initializer_name()) {
+                classfile_parse_error(
+                  "Bad constructor name at constant pool index %u in class file %s",
+                  name_ref_index, CHECK_(nullHandle));
+              }
+            } else {
+              if (name() == vmSymbols::object_initializer_name()) {
+                classfile_parse_error(
+                  "Bad method name at constant pool index %u in class file %s",
+                  name_ref_index, CHECK_(nullHandle));
+              }
+            }
+          }
+          break;
+          // Other ref_kinds are already fully checked in previous pass.
+        }
+        break;
+      }
+      case JVM_CONSTANT_MethodType: {
+        symbolHandle no_name = vmSymbolHandles::type_name(); // place holder
+        symbolHandle signature(THREAD, cp->method_type_signature_at(index));
+        verify_legal_method_signature(no_name, signature, CHECK_(nullHandle));
+        break;
+      }
     }  // end of switch
   }  // end of for
 
@@ -467,7 +581,7 @@
   case JVM_CONSTANT_UnresolvedClass :
     // Patching a class means pre-resolving it.
     // The name in the constant pool is ignored.
-    if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance
+    if (java_lang_Class::is_instance(patch())) {
       guarantee_property(!java_lang_Class::is_primitive(patch()),
                          "Illegal class patch at %d in class file %s",
                          index, CHECK);
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -2454,6 +2454,48 @@
   return Handle(THREAD, (oop) result.get_jobject());
 }
 
+// Ask Java code to find or construct a method handle constant.
+Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
+                                                     int ref_kind, //e.g., JVM_REF_invokeVirtual
+                                                     KlassHandle callee,
+                                                     symbolHandle name_sym,
+                                                     symbolHandle signature,
+                                                     TRAPS) {
+  Handle empty;
+  Handle name = java_lang_String::create_from_symbol(name_sym(), CHECK_(empty));
+  Handle type;
+  if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
+    bool ignore_is_on_bcp = false;
+    type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty));
+  } else {
+    SignatureStream ss(signature(), false);
+    if (!ss.is_done()) {
+      oop mirror = ss.as_java_mirror(caller->class_loader(), caller->protection_domain(),
+                                     SignatureStream::NCDFError, CHECK_(empty));
+      type = Handle(THREAD, mirror);
+      ss.next();
+      if (!ss.is_done())  type = Handle();  // error!
+    }
+  }
+  if (type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty);
+  }
+
+  // call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
+  JavaCallArguments args;
+  args.push_oop(caller->java_mirror());  // the referring class
+  args.push_int(ref_kind);
+  args.push_oop(callee->java_mirror());  // the target class
+  args.push_oop(name());
+  args.push_oop(type());
+  JavaValue result(T_OBJECT);
+  JavaCalls::call_static(&result,
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::linkMethodHandleConstant_name(),
+                         vmSymbols::linkMethodHandleConstant_signature(),
+                         &args, CHECK_(empty));
+  return Handle(THREAD, (oop) result.get_jobject());
+}
 
 // Ask Java code to find or construct a java.dyn.CallSite for the given
 // name and signature, as interpreted relative to the given class loader.
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -473,6 +473,13 @@
                                            KlassHandle accessing_klass,
                                            bool& return_bcp_flag,
                                            TRAPS);
+  // ask Java to compute a java.dyn.MethodHandle object for a given CP entry
+  static Handle    link_method_handle_constant(KlassHandle caller,
+                                               int ref_kind, //e.g., JVM_REF_invokeVirtual
+                                               KlassHandle callee,
+                                               symbolHandle name,
+                                               symbolHandle signature,
+                                               TRAPS);
   // ask Java to create a dynamic call site, while linking an invokedynamic op
   static Handle    make_dynamic_call_site(Handle bootstrap_method,
                                           // Callee information:
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1598,7 +1598,10 @@
   if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
     if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) {
       types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
-            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class);
+            | (1 << JVM_CONSTANT_String)  | (1 << JVM_CONSTANT_Class)
+            | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType);
+      // Note:  The class file parser already verified the legality of
+      // MethodHandle and MethodType constants.
       verify_cp_type(index, cp, types, CHECK_VERIFY(this));
     }
   } else {
@@ -1632,6 +1635,14 @@
     current_frame->push_stack_2(
       VerificationType::long_type(),
       VerificationType::long2_type(), CHECK_VERIFY(this));
+  } else if (tag.is_method_handle()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this));
+  } else if (tag.is_method_type()) {
+    current_frame->push_stack(
+      VerificationType::reference_type(
+        vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this));
   } else {
     verify_error(bci, "Invalid index in ldc");
     return;
@@ -1920,9 +1931,12 @@
   // Get referenced class type
   VerificationType ref_class_type;
   if (opcode == Bytecodes::_invokedynamic) {
-    if (!EnableInvokeDynamic) {
+    if (!EnableInvokeDynamic ||
+        _klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
       class_format_error(
-        "invokedynamic instructions not enabled on this JVM",
+        (!EnableInvokeDynamic ?
+         "invokedynamic instructions not enabled in this JVM" :
+         "invokedynamic instructions not supported by this class file version"),
         _klass->external_name());
       return;
     }
--- a/hotspot/src/share/vm/classfile/verifier.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -25,7 +25,10 @@
 // The verifier class
 class Verifier : AllStatic {
  public:
-  enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 };
+  enum {
+    STACKMAP_ATTRIBUTE_MAJOR_VERSION    = 50,
+    INVOKEDYNAMIC_MAJOR_VERSION         = 51
+  };
   typedef enum { ThrowException, NoException } Mode;
 
   /**
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -246,6 +246,8 @@
   /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
   template(findMethodHandleType_name,                 "findMethodHandleType")                     \
   template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
+  template(linkMethodHandleConstant_name,             "linkMethodHandleConstant")                 \
+  template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
   template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
   template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
   NOT_LP64(  do_alias(machine_word_signature,         int_signature)  )                           \
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -584,6 +584,7 @@
     _oops_do_mark_link       = NULL;
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
+    _jmethod_id              = NULL;
     _osr_link                = NULL;
     _scavenge_root_link      = NULL;
     _scavenge_root_state     = 0;
@@ -677,6 +678,7 @@
     _oops_do_mark_link       = NULL;
     _method                  = method;
     _entry_bci               = InvocationEntryBci;
+    _jmethod_id              = NULL;
     _osr_link                = NULL;
     _scavenge_root_link      = NULL;
     _scavenge_root_state     = 0;
@@ -784,6 +786,7 @@
     NOT_PRODUCT(_has_debug_info = false);
     _oops_do_mark_link       = NULL;
     _method                  = method;
+    _jmethod_id              = NULL;
     _compile_id              = compile_id;
     _comp_level              = comp_level;
     _entry_bci               = entry_bci;
@@ -1488,11 +1491,25 @@
       moop->signature()->utf8_length(),
       code_begin(), code_size());
 
+  if (JvmtiExport::should_post_compiled_method_load() ||
+      JvmtiExport::should_post_compiled_method_unload()) {
+    get_and_cache_jmethod_id();
+  }
+
   if (JvmtiExport::should_post_compiled_method_load()) {
     JvmtiExport::post_compiled_method_load(this);
   }
 }
 
+jmethodID nmethod::get_and_cache_jmethod_id() {
+  if (_jmethod_id == NULL) {
+    // Cache the jmethod_id since it can no longer be looked up once the
+    // method itself has been marked for unloading.
+    _jmethod_id = method()->jmethod_id();
+  }
+  return _jmethod_id;
+}
+
 void nmethod::post_compiled_method_unload() {
   if (unload_reported()) {
     // During unloading we transition to unloaded and then to zombie
@@ -1504,12 +1521,17 @@
   DTRACE_METHOD_UNLOAD_PROBE(method());
 
   // If a JVMTI agent has enabled the CompiledMethodUnload event then
-  // post the event. Sometime later this nmethod will be made a zombie by
-  // the sweeper but the methodOop will not be valid at that point.
-  if (JvmtiExport::should_post_compiled_method_unload()) {
+  // post the event. Sometime later this nmethod will be made a zombie
+  // by the sweeper but the methodOop will not be valid at that point.
+  // If the _jmethod_id is null then no load event was ever requested
+  // so don't bother posting the unload.  The main reason for this is
+  // that the jmethodID is a weak reference to the methodOop so if
+  // it's being unloaded there's no way to look it up since the weak
+  // ref will have been cleared.
+  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     HandleMark hm;
-    JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin());
+    JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin());
   }
 
   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@@ -2659,13 +2681,10 @@
         case Bytecodes::_getstatic:
         case Bytecodes::_putstatic:
           {
-            methodHandle sdm = sd->method();
-            Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci()));
-            constantPoolOop sdmc = sdm->constants();
-            symbolOop name = sdmc->name_ref_at(field->index());
+            Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci());
             st->print(" ");
-            if (name != NULL)
-              name->print_symbol_on(st);
+            if (field->name() != NULL)
+              field->name()->print_symbol_on(st);
             else
               st->print("<UNKNOWN>");
           }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -135,6 +135,7 @@
 
   methodOop _method;
   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
+  jmethodID _jmethod_id;       // Cache of method()->jmethod_id()
 
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
@@ -599,6 +600,7 @@
 
   // jvmti support:
   void post_compiled_method_load_event();
+  jmethodID get_and_cache_jmethod_id();
 
   // verify operations
   void verify();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -3972,6 +3972,10 @@
 
   void work(int i) {
     if (i >= _n_workers) return;  // no work needed this round
+
+    double start_time_ms = os::elapsedTime() * 1000.0;
+    _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
+
     ResourceMark rm;
     HandleMark   hm;
 
@@ -4019,7 +4023,7 @@
       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
       double term_ms = pss.term_time()*1000.0;
       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
-      _g1h->g1_policy()->record_termination_time(i, term_ms);
+      _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
     }
     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@@ -4043,7 +4047,8 @@
       double term         = pss.term_time();
       gclog_or_tty->print("  Elapsed: %7.2f ms.\n"
                           "    Strong roots: %7.2f ms (%6.2f%%)\n"
-                          "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
+                          "    Termination:  %7.2f ms (%6.2f%%) "
+                                                 "(in "SIZE_FORMAT" entries)\n",
                           elapsed * 1000.0,
                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
                           term * 1000.0, (term*100.0/elapsed),
@@ -4059,6 +4064,8 @@
 
     assert(pss.refs_to_scan() == 0, "Task queue should be empty");
     assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
+    double end_time_ms = os::elapsedTime() * 1000.0;
+    _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
   }
 };
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1549,7 +1549,7 @@
   int _hash_seed;
   int _queue_num;
 
-  int _term_attempts;
+  size_t _term_attempts;
 #if G1_DETAILED_STATS
   int _pushes, _pops, _steals, _steal_attempts;
   int _overflow_pushes;
@@ -1727,8 +1727,8 @@
   int* hash_seed() { return &_hash_seed; }
   int  queue_num() { return _queue_num; }
 
-  int term_attempts()   { return _term_attempts; }
-  void note_term_attempt()  { _term_attempts++; }
+  size_t term_attempts()   { return _term_attempts; }
+  void note_term_attempt() { _term_attempts++; }
 
 #if G1_DETAILED_STATS
   int pushes()          { return _pushes; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -231,20 +231,21 @@
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
+  _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
 
-  _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 
-  _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
 
   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 
   _par_last_termination_times_ms = new double[_parallel_gc_threads];
+  _par_last_termination_attempts = new double[_parallel_gc_threads];
+  _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 
   // start conservatively
   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
@@ -274,10 +275,64 @@
 
   // </NEW PREDICTION>
 
-  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
+  // Below, we might need to calculate the pause time target based on
+  // the pause interval. When we do so we are going to give G1 maximum
+  // flexibility and allow it to do pauses when it needs to. So, we'll
+  // arrange that the pause interval to be pause time target + 1 to
+  // ensure that a) the pause time target is maximized with respect to
+  // the pause interval and b) we maintain the invariant that pause
+  // time target < pause interval. If the user does not want this
+  // maximum flexibility, they will have to set the pause interval
+  // explicitly.
+
+  // First make sure that, if either parameter is set, its value is
+  // reasonable.
+  if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+    if (MaxGCPauseMillis < 1) {
+      vm_exit_during_initialization("MaxGCPauseMillis should be "
+                                    "greater than 0");
+    }
+  }
+  if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+    if (GCPauseIntervalMillis < 1) {
+      vm_exit_during_initialization("GCPauseIntervalMillis should be "
+                                    "greater than 0");
+    }
+  }
+
+  // Then, if the pause time target parameter was not set, set it to
+  // the default value.
+  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+    if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+      // The default pause time target in G1 is 200ms
+      FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
+    } else {
+      // We do not allow the pause interval to be set without the
+      // pause time target
+      vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
+                                    "without setting MaxGCPauseMillis");
+    }
+  }
+
+  // Then, if the interval parameter was not set, set it according to
+  // the pause time target (this will also deal with the case when the
+  // pause time target is the default value).
+  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
+  }
+
+  // Finally, make sure that the two parameters are consistent.
+  if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
+    char buffer[256];
+    jio_snprintf(buffer, 256,
+                 "MaxGCPauseMillis (%u) should be less than "
+                 "GCPauseIntervalMillis (%u)",
+                 MaxGCPauseMillis, GCPauseIntervalMillis);
+    vm_exit_during_initialization(buffer);
+  }
+
   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
-  guarantee(max_gc_time < time_slice,
-            "Max GC time should not be greater than the time slice");
+  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   _sigma = (double) G1ConfidencePercent / 100.0;
 
@@ -782,16 +837,17 @@
   // if they are not set properly
 
   for (int i = 0; i < _parallel_gc_threads; ++i) {
-    _par_last_ext_root_scan_times_ms[i] = -666.0;
-    _par_last_mark_stack_scan_times_ms[i] = -666.0;
-    _par_last_update_rs_start_times_ms[i] = -666.0;
-    _par_last_update_rs_times_ms[i] = -666.0;
-    _par_last_update_rs_processed_buffers[i] = -666.0;
-    _par_last_scan_rs_start_times_ms[i] = -666.0;
-    _par_last_scan_rs_times_ms[i] = -666.0;
-    _par_last_scan_new_refs_times_ms[i] = -666.0;
-    _par_last_obj_copy_times_ms[i] = -666.0;
-    _par_last_termination_times_ms[i] = -666.0;
+    _par_last_gc_worker_start_times_ms[i] = -1234.0;
+    _par_last_ext_root_scan_times_ms[i] = -1234.0;
+    _par_last_mark_stack_scan_times_ms[i] = -1234.0;
+    _par_last_update_rs_times_ms[i] = -1234.0;
+    _par_last_update_rs_processed_buffers[i] = -1234.0;
+    _par_last_scan_rs_times_ms[i] = -1234.0;
+    _par_last_scan_new_refs_times_ms[i] = -1234.0;
+    _par_last_obj_copy_times_ms[i] = -1234.0;
+    _par_last_termination_times_ms[i] = -1234.0;
+    _par_last_termination_attempts[i] = -1234.0;
+    _par_last_gc_worker_end_times_ms[i] = -1234.0;
   }
 #endif
 
@@ -942,9 +998,9 @@
   return sum;
 }
 
-void G1CollectorPolicy::print_par_stats (int level,
-                                         const char* str,
-                                         double* data,
+void G1CollectorPolicy::print_par_stats(int level,
+                                        const char* str,
+                                        double* data,
                                          bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
@@ -973,10 +1029,10 @@
   gclog_or_tty->print_cr("]");
 }
 
-void G1CollectorPolicy::print_par_buffers (int level,
-                                         const char* str,
-                                         double* data,
-                                         bool summary) {
+void G1CollectorPolicy::print_par_sizes(int level,
+                                        const char* str,
+                                        double* data,
+                                        bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
   int j;
@@ -1321,15 +1377,22 @@
       }
       if (parallel) {
         print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
-        print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
+        print_par_stats(2, "GC Worker Start Time",
+                        _par_last_gc_worker_start_times_ms, false);
         print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
-        print_par_buffers(3, "Processed Buffers",
-                          _par_last_update_rs_processed_buffers, true);
-        print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
-        print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
+        print_par_sizes(3, "Processed Buffers",
+                        _par_last_update_rs_processed_buffers, true);
+        print_par_stats(2, "Ext Root Scanning",
+                        _par_last_ext_root_scan_times_ms);
+        print_par_stats(2, "Mark Stack Scanning",
+                        _par_last_mark_stack_scan_times_ms);
         print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
         print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
         print_par_stats(2, "Termination", _par_last_termination_times_ms);
+        print_par_sizes(3, "Termination Attempts",
+                        _par_last_termination_attempts, true);
+        print_par_stats(2, "GC Worker End Time",
+                        _par_last_gc_worker_end_times_ms, false);
         print_stats(2, "Other", parallel_other_time);
         print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
       } else {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -171,16 +171,17 @@
   double*    _cur_aux_times_ms;
   bool*      _cur_aux_times_set;
 
+  double* _par_last_gc_worker_start_times_ms;
   double* _par_last_ext_root_scan_times_ms;
   double* _par_last_mark_stack_scan_times_ms;
-  double* _par_last_update_rs_start_times_ms;
   double* _par_last_update_rs_times_ms;
   double* _par_last_update_rs_processed_buffers;
-  double* _par_last_scan_rs_start_times_ms;
   double* _par_last_scan_rs_times_ms;
   double* _par_last_scan_new_refs_times_ms;
   double* _par_last_obj_copy_times_ms;
   double* _par_last_termination_times_ms;
+  double* _par_last_termination_attempts;
+  double* _par_last_gc_worker_end_times_ms;
 
   // indicates that we are in young GC mode
   bool _in_young_gc_mode;
@@ -559,13 +560,14 @@
   }
 
 protected:
-  void print_stats (int level, const char* str, double value);
-  void print_stats (int level, const char* str, int value);
-  void print_par_stats (int level, const char* str, double* data) {
+  void print_stats(int level, const char* str, double value);
+  void print_stats(int level, const char* str, int value);
+
+  void print_par_stats(int level, const char* str, double* data) {
     print_par_stats(level, str, data, true);
   }
-  void print_par_stats (int level, const char* str, double* data, bool summary);
-  void print_par_buffers (int level, const char* str, double* data, bool summary);
+  void print_par_stats(int level, const char* str, double* data, bool summary);
+  void print_par_sizes(int level, const char* str, double* data, bool summary);
 
   void check_other_times(int level,
                          NumberSeq* other_times_ms,
@@ -891,6 +893,10 @@
   virtual void record_full_collection_start();
   virtual void record_full_collection_end();
 
+  void record_gc_worker_start_time(int worker_i, double ms) {
+    _par_last_gc_worker_start_times_ms[worker_i] = ms;
+  }
+
   void record_ext_root_scan_time(int worker_i, double ms) {
     _par_last_ext_root_scan_times_ms[worker_i] = ms;
   }
@@ -912,10 +918,6 @@
     _all_mod_union_times_ms->add(ms);
   }
 
-  void record_update_rs_start_time(int thread, double ms) {
-    _par_last_update_rs_start_times_ms[thread] = ms;
-  }
-
   void record_update_rs_time(int thread, double ms) {
     _par_last_update_rs_times_ms[thread] = ms;
   }
@@ -925,10 +927,6 @@
     _par_last_update_rs_processed_buffers[thread] = processed_buffers;
   }
 
-  void record_scan_rs_start_time(int thread, double ms) {
-    _par_last_scan_rs_start_times_ms[thread] = ms;
-  }
-
   void record_scan_rs_time(int thread, double ms) {
     _par_last_scan_rs_times_ms[thread] = ms;
   }
@@ -953,16 +951,13 @@
     _par_last_obj_copy_times_ms[thread] += ms;
   }
 
-  void record_obj_copy_time(double ms) {
-    record_obj_copy_time(0, ms);
+  void record_termination(int thread, double ms, size_t attempts) {
+    _par_last_termination_times_ms[thread] = ms;
+    _par_last_termination_attempts[thread] = (double) attempts;
   }
 
-  void record_termination_time(int thread, double ms) {
-    _par_last_termination_times_ms[thread] = ms;
-  }
-
-  void record_termination_time(double ms) {
-    record_termination_time(0, ms);
+  void record_gc_worker_end_time(int worker_i, double ms) {
+    _par_last_gc_worker_end_times_ms[worker_i] = ms;
   }
 
   void record_pause_time_ms(double ms) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -303,7 +303,6 @@
   assert( _cards_scanned != NULL, "invariant" );
   _cards_scanned[worker_i] = scanRScl.cards_done();
 
-  _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
   _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
 }
 
@@ -311,8 +310,6 @@
   ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
 
   double start = os::elapsedTime();
-  _g1p->record_update_rs_start_time(worker_i, start * 1000.0);
-
   // Apply the appropriate closure to all remaining log entries.
   _g1->iterate_dirty_card_closure(false, worker_i);
   // Now there should be no dirty cards.
@@ -471,7 +468,6 @@
       updateRS(worker_i);
       scanNewRefsRS(oc, worker_i);
     } else {
-      _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
       _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
       _g1p->record_update_rs_time(worker_i, 0.0);
       _g1p->record_scan_new_refs_time(worker_i, 0.0);
@@ -479,7 +475,6 @@
     if (G1UseParallelRSetScanning || (worker_i == 0)) {
       scanRS(oc, worker_i);
     } else {
-      _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
       _g1p->record_scan_rs_time(worker_i, 0.0);
     }
   } else {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,14 +566,14 @@
 #endif
 
   // Commit new or uncommit old pages, if necessary.
-  resize_commit_uncommit(changed_region, new_region);
+  if (resize_commit_uncommit(changed_region, new_region)) {
+    // Set the new start of the committed region
+    resize_update_committed_table(changed_region, new_region);
+  }
 
   // Update card table entries
   resize_update_card_table_entries(changed_region, new_region);
 
-  // Set the new start of the committed region
-  resize_update_committed_table(changed_region, new_region);
-
   // Update the covered region
   resize_update_covered_table(changed_region, new_region);
 
@@ -604,8 +604,9 @@
   debug_only(verify_guard();)
 }
 
-void CardTableExtension::resize_commit_uncommit(int changed_region,
+bool CardTableExtension::resize_commit_uncommit(int changed_region,
                                                 MemRegion new_region) {
+  bool result = false;
   // Commit new or uncommit old pages, if necessary.
   MemRegion cur_committed = _committed[changed_region];
   assert(_covered[changed_region].end() == new_region.end(),
@@ -675,20 +676,31 @@
                               "card table expansion");
       }
     }
+    result = true;
   } else if (new_start_aligned > cur_committed.start()) {
     // Shrink the committed region
+#if 0 // uncommitting space is currently unsafe because of the interactions
+      // of growing and shrinking regions.  One region A can uncommit space
+      // that it owns but which is being used by another region B (maybe).
+      // Region B has not committed the space because it was already
+      // committed by region A.
     MemRegion uncommit_region = committed_unique_to_self(changed_region,
       MemRegion(cur_committed.start(), new_start_aligned));
     if (!uncommit_region.is_empty()) {
       if (!os::uncommit_memory((char*)uncommit_region.start(),
                                uncommit_region.byte_size())) {
-        vm_exit_out_of_memory(uncommit_region.byte_size(),
-          "card table contraction");
+        // If the uncommit fails, ignore it.  Let the
+        // committed table resizing go even though the committed
+        // table will over state the committed space.
       }
     }
+#else
+    assert(!result, "Should be false with current workaround");
+#endif
   }
   assert(_committed[changed_region].end() == cur_committed.end(),
     "end should not change");
+  return result;
 }
 
 void CardTableExtension::resize_update_committed_table(int changed_region,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,9 @@
 class CardTableExtension : public CardTableModRefBS {
  private:
   // Support methods for resizing the card table.
-  void resize_commit_uncommit(int changed_region, MemRegion new_region);
+  // resize_commit_uncommit() returns true if the pages were committed or
+  // uncommitted
+  bool resize_commit_uncommit(int changed_region, MemRegion new_region);
   void resize_update_card_table_entries(int changed_region,
                                         MemRegion new_region);
   void resize_update_committed_table(int changed_region, MemRegion new_region);
--- a/hotspot/src/share/vm/includeDB_core	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_core	Fri Jul 02 01:36:15 2010 -0700
@@ -545,6 +545,7 @@
 
 ciCPCache.hpp                           ciClassList.hpp
 ciCPCache.hpp                           ciObject.hpp
+ciCPCache.hpp                           cpCacheOop.hpp
 
 ciEnv.cpp                               allocation.inline.hpp
 ciEnv.cpp                               ciConstant.hpp
@@ -823,6 +824,7 @@
 
 ciStreams.cpp                           ciCallSite.hpp
 ciStreams.cpp                           ciConstant.hpp
+ciStreams.cpp                           ciCPCache.hpp
 ciStreams.cpp                           ciField.hpp
 ciStreams.cpp                           ciStreams.hpp
 ciStreams.cpp                           ciUtilities.hpp
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecode.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -136,25 +136,24 @@
 // Implementation of Bytecode_invoke
 
 void Bytecode_invoke::verify() const {
-  Bytecodes::Code bc = adjusted_invoke_code();
   assert(is_valid(), "check invoke");
   assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter");
 }
 
 
-symbolOop Bytecode_invoke::signature() const {
+symbolOop Bytecode_member_ref::signature() const {
   constantPoolOop constants = method()->constants();
   return constants->signature_ref_at(index());
 }
 
 
-symbolOop Bytecode_invoke::name() const {
+symbolOop Bytecode_member_ref::name() const {
   constantPoolOop constants = method()->constants();
   return constants->name_ref_at(index());
 }
 
 
-BasicType Bytecode_invoke::result_type(Thread *thread) const {
+BasicType Bytecode_member_ref::result_type(Thread *thread) const {
   symbolHandle sh(thread, signature());
   ResultTypeFinder rts(sh);
   rts.iterate();
@@ -167,9 +166,9 @@
   KlassHandle resolved_klass;
   constantPoolHandle constants(THREAD, _method->constants());
 
-  if (adjusted_invoke_code() == Bytecodes::_invokedynamic) {
+  if (java_code() == Bytecodes::_invokedynamic) {
     LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
-  } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
+  } else if (java_code() != Bytecodes::_invokeinterface) {
     LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
   } else {
     LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
@@ -178,51 +177,68 @@
 }
 
 
-int Bytecode_invoke::index() const {
+int Bytecode_member_ref::index() const {
   // Note:  Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
   // at the same time it allocates per-call-site CP cache entries.
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  Bytecode* invoke = Bytecode_at(bcp());
-  if (invoke->has_index_u4(stdc))
-    return invoke->get_index_u4(stdc);
+  Bytecodes::Code rawc = code();
+  Bytecode* invoke = bytecode();
+  if (invoke->has_index_u4(rawc))
+    return invoke->get_index_u4(rawc);
   else
-    return invoke->get_index_u2_cpcache(stdc);
+    return invoke->get_index_u2_cpcache(rawc);
 }
 
+int Bytecode_member_ref::pool_index() const {
+  int index = this->index();
+  DEBUG_ONLY({
+      if (!bytecode()->has_index_u4(code()))
+        index -= constantPoolOopDesc::CPCACHE_INDEX_TAG;
+    });
+  return _method->constants()->cache()->entry_at(index)->constant_pool_index();
+}
 
 // Implementation of Bytecode_field
 
 void Bytecode_field::verify() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  assert(stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic ||
-         stdc == Bytecodes::_putfield  || stdc == Bytecodes::_getfield, "check field");
-}
-
-
-bool Bytecode_field::is_static() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  return stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic;
+  assert(is_valid(), "check field");
 }
 
 
-int Bytecode_field::index() const {
-  Bytecode* invoke = Bytecode_at(bcp());
-  return invoke->get_index_u2_cpcache(Bytecodes::_getfield);
+// Implementation of Bytecode_loadconstant
+
+int Bytecode_loadconstant::raw_index() const {
+  Bytecode* bcp = bytecode();
+  Bytecodes::Code rawc = bcp->code();
+  assert(rawc != Bytecodes::_wide, "verifier prevents this");
+  if (Bytecodes::java_code(rawc) == Bytecodes::_ldc)
+    return bcp->get_index_u1(rawc);
+  else
+    return bcp->get_index_u2(rawc, false);
 }
 
-
-// Implementation of Bytecodes loac constant
+int Bytecode_loadconstant::pool_index() const {
+  int index = raw_index();
+  if (has_cache_index()) {
+    return _method->constants()->cache()->entry_at(index)->constant_pool_index();
+  }
+  return index;
+}
 
-int Bytecode_loadconstant::index() const {
-  Bytecodes::Code stdc = Bytecodes::java_code(code());
-  if (stdc != Bytecodes::_wide) {
-    if (Bytecodes::java_code(stdc) == Bytecodes::_ldc)
-      return get_index_u1(stdc);
-    else
-      return get_index_u2(stdc, false);
+BasicType Bytecode_loadconstant::result_type() const {
+  int index = pool_index();
+  constantTag tag = _method->constants()->tag_at(index);
+  return tag.basic_type();
+}
+
+oop Bytecode_loadconstant::resolve_constant(TRAPS) const {
+  assert(_method.not_null(), "must supply method to resolve constant");
+  int index = raw_index();
+  constantPoolOop constants = _method->constants();
+  if (has_cache_index()) {
+    return constants->resolve_cached_constant_at(index, THREAD);
+  } else {
+    return constants->resolve_constant_at(index, THREAD);
   }
-  stdc = Bytecodes::code_at(addr_at(1));
-  return get_index_u2(stdc, true);
 }
 
 //------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -76,9 +76,13 @@
           return Bytes::get_native_u2(p);
     else  return Bytes::get_Java_u2(p);
   }
+  int get_index_u1_cpcache(Bytecodes::Code bc) const {
+    assert_same_format_as(bc); assert_index_size(1, bc);
+    return *(jubyte*)addr_at(1) + constantPoolOopDesc::CPCACHE_INDEX_TAG;
+  }
   int get_index_u2_cpcache(Bytecodes::Code bc) const {
     assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc);
-    return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG);
+    return Bytes::get_native_u2(addr_at(1)) + constantPoolOopDesc::CPCACHE_INDEX_TAG;
   }
   int get_index_u4(Bytecodes::Code bc) const {
     assert_same_format_as(bc); assert_index_size(4, bc);
@@ -152,7 +156,7 @@
 
 inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) {
   Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -174,44 +178,56 @@
 
 inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) {
   Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
 
-// Abstraction for invoke_{virtual, static, interface, special}
+// Common code for decoding invokes and field references.
 
-class Bytecode_invoke: public ResourceObj {
+class Bytecode_member_ref: public ResourceObj {
  protected:
   methodHandle _method;                          // method containing the bytecode
   int          _bci;                             // position of the bytecode
 
-  Bytecode_invoke(methodHandle method, int bci)  : _method(method), _bci(bci) {}
+  Bytecode_member_ref(methodHandle method, int bci)  : _method(method), _bci(bci) {}
+
+ public:
+  // Attributes
+  methodHandle method() const                    { return _method; }
+  int          bci() const                       { return _bci; }
+  address      bcp() const                       { return _method->bcp_from(bci()); }
+  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
+
+  int          index() const;                    // cache index (loaded from instruction)
+  int          pool_index() const;               // constant pool index
+  symbolOop    name() const;                     // returns the name of the method or field
+  symbolOop    signature() const;                // returns the signature of the method or field
+
+  BasicType    result_type(Thread* thread) const; // returns the result type of the getfield or invoke
+
+  Bytecodes::Code code() const                   { return Bytecodes::code_at(bcp(), _method()); }
+  Bytecodes::Code java_code() const              { return Bytecodes::java_code(code()); }
+};
+
+// Abstraction for invoke_{virtual, static, interface, special}
+
+class Bytecode_invoke: public Bytecode_member_ref {
+ protected:
+  Bytecode_invoke(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
 
  public:
   void verify() const;
 
   // Attributes
-  methodHandle method() const                    { return _method; }
-  int          bci() const                       { return _bci; }
-  address      bcp() const                       { return _method->bcp_from(bci()); }
-
-  int          index() const;                    // the constant pool index for the invoke
-  symbolOop    name() const;                     // returns the name of the invoked method
-  symbolOop    signature() const;                // returns the signature of the invoked method
-  BasicType    result_type(Thread *thread) const; // returns the result type of the invoke
-
-  Bytecodes::Code code() const                   { return Bytecodes::code_at(bcp(), _method()); }
-  Bytecodes::Code adjusted_invoke_code() const   { return Bytecodes::java_code(code()); }
-
   methodHandle static_target(TRAPS);             // "specified" method   (from constant pool)
 
   // Testers
-  bool is_invokeinterface() const                { return adjusted_invoke_code() == Bytecodes::_invokeinterface; }
-  bool is_invokevirtual() const                  { return adjusted_invoke_code() == Bytecodes::_invokevirtual; }
-  bool is_invokestatic() const                   { return adjusted_invoke_code() == Bytecodes::_invokestatic; }
-  bool is_invokespecial() const                  { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
-  bool is_invokedynamic() const                  { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
+  bool is_invokeinterface() const                { return java_code() == Bytecodes::_invokeinterface; }
+  bool is_invokevirtual() const                  { return java_code() == Bytecodes::_invokevirtual; }
+  bool is_invokestatic() const                   { return java_code() == Bytecodes::_invokestatic; }
+  bool is_invokespecial() const                  { return java_code() == Bytecodes::_invokespecial; }
+  bool is_invokedynamic() const                  { return java_code() == Bytecodes::_invokedynamic; }
 
   bool has_receiver() const                      { return !is_invokestatic() && !is_invokedynamic(); }
 
@@ -230,7 +246,7 @@
 
 inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) {
   Bytecode_invoke* b = new Bytecode_invoke(method, bci);
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -240,21 +256,34 @@
 }
 
 
-// Abstraction for all field accesses (put/get field/static_
-class Bytecode_field: public Bytecode {
-public:
+// Abstraction for all field accesses (put/get field/static)
+class Bytecode_field: public Bytecode_member_ref {
+ protected:
+  Bytecode_field(methodHandle method, int bci)  : Bytecode_member_ref(method, bci) {}
+
+ public:
+  // Testers
+  bool is_getfield() const                       { return java_code() == Bytecodes::_getfield; }
+  bool is_putfield() const                       { return java_code() == Bytecodes::_putfield; }
+  bool is_getstatic() const                      { return java_code() == Bytecodes::_getstatic; }
+  bool is_putstatic() const                      { return java_code() == Bytecodes::_putstatic; }
+
+  bool is_getter() const                         { return is_getfield()  || is_getstatic(); }
+  bool is_static() const                         { return is_getstatic() || is_putstatic(); }
+
+  bool is_valid() const                          { return is_getfield()   ||
+                                                          is_putfield()   ||
+                                                          is_getstatic()  ||
+                                                          is_putstatic(); }
   void verify() const;
 
-  int  index() const;
-  bool is_static() const;
-
   // Creation
-  inline friend Bytecode_field* Bytecode_field_at(const methodOop method, address bcp);
+  inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci);
 };
 
-inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) {
-  Bytecode_field* b = (Bytecode_field*)bcp;
-  debug_only(b->verify());
+inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) {
+  Bytecode_field* b = new Bytecode_field(method, bci);
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -274,7 +303,7 @@
 
 inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) {
   Bytecode_checkcast* b = (Bytecode_checkcast*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -294,7 +323,7 @@
 
 inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) {
   Bytecode_instanceof* b = (Bytecode_instanceof*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -312,7 +341,7 @@
 
 inline Bytecode_new* Bytecode_new_at(address bcp) {
   Bytecode_new* b = (Bytecode_new*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -330,7 +359,7 @@
 
 inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) {
   Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
@@ -348,29 +377,57 @@
 
 inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) {
   Bytecode_anewarray* b = (Bytecode_anewarray*)bcp;
-  debug_only(b->verify());
+  DEBUG_ONLY(b->verify());
   return b;
 }
 
 
 // Abstraction for ldc, ldc_w and ldc2_w
 
-class Bytecode_loadconstant: public Bytecode {
+class Bytecode_loadconstant: public ResourceObj {
+ private:
+  int          _bci;
+  methodHandle _method;
+
+  Bytecodes::Code code() const                   { return bytecode()->code(); }
+
+  int raw_index() const;
+
+  Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {}
+
  public:
+  // Attributes
+  methodHandle method() const                    { return _method; }
+  int          bci() const                       { return _bci; }
+  address      bcp() const                       { return _method->bcp_from(bci()); }
+  Bytecode*    bytecode() const                  { return Bytecode_at(bcp()); }
+
   void verify() const {
+    assert(_method.not_null(), "must supply method");
     Bytecodes::Code stdc = Bytecodes::java_code(code());
     assert(stdc == Bytecodes::_ldc ||
            stdc == Bytecodes::_ldc_w ||
            stdc == Bytecodes::_ldc2_w, "load constant");
   }
 
-  int index() const;
+  // Only non-standard bytecodes (fast_aldc) have CP cache indexes.
+  bool has_cache_index() const { return code() >= Bytecodes::number_of_java_codes; }
 
-  inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp);
+  int pool_index() const;               // index into constant pool
+  int cache_index() const {             // index into CP cache (or -1 if none)
+    return has_cache_index() ? raw_index() : -1;
+  }
+
+  BasicType result_type() const;        // returns the result type of the ldc
+
+  oop resolve_constant(TRAPS) const;
+
+  // Creation
+  inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci);
 };
 
-inline Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp) {
-  Bytecode_loadconstant* b = (Bytecode_loadconstant*)bcp;
-  debug_only(b->verify());
+inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) {
+  Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci);
+  DEBUG_ONLY(b->verify());
   return b;
 }
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -49,6 +49,7 @@
 
   int       get_index_u1()           { return *(address)_next_pc++; }
   int       get_index_u2()           { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
+  int       get_index_u1_cpcache()   { return get_index_u1() + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
   int       get_index_u2_cpcache()   { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; }
   int       get_index_u4()           { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
   int       get_index_special()      { return (is_wide()) ? get_index_u2() : get_index_u1(); }
@@ -60,6 +61,7 @@
   bool      check_index(int i, int& cp_index, outputStream* st = tty);
   void      print_constant(int i, outputStream* st = tty);
   void      print_field_or_method(int i, outputStream* st = tty);
+  void      print_field_or_method(int orig_i, int i, outputStream* st = tty);
   void      print_attributes(int bci, outputStream* st = tty);
   void      bytecode_epilog(int bci, outputStream* st = tty);
 
@@ -177,18 +179,29 @@
   _closure->trace(method, bcp, st);
 }
 
+void print_symbol(symbolOop sym, outputStream* st) {
+  char buf[40];
+  int len = sym->utf8_length();
+  if (len >= (int)sizeof(buf)) {
+    st->print_cr(" %s...[%d]", sym->as_C_string(buf, sizeof(buf)), len);
+  } else {
+    st->print(" ");
+    sym->print_on(st); st->cr();
+  }
+}
+
 void print_oop(oop value, outputStream* st) {
   if (value == NULL) {
     st->print_cr(" NULL");
-  } else {
+  } else if (java_lang_String::is_instance(value)) {
     EXCEPTION_MARK;
     Handle h_value (THREAD, value);
     symbolHandle sym = java_lang_String::as_symbol(h_value, CATCH);
-    if (sym->utf8_length() > 32) {
-      st->print_cr(" ....");
-    } else {
-      sym->print_on(st); st->cr();
-    }
+    print_symbol(sym(), st);
+  } else if (value->is_symbol()) {
+    print_symbol(symbolOop(value), st);
+  } else {
+    st->print_cr(" " PTR_FORMAT, (intptr_t) value);
   }
 }
 
@@ -279,16 +292,27 @@
   } else if (tag.is_double()) {
     st->print_cr(" %f", constants->double_at(i));
   } else if (tag.is_string()) {
-    oop string = constants->resolved_string_at(i);
+    oop string = constants->pseudo_string_at(i);
     print_oop(string, st);
   } else if (tag.is_unresolved_string()) {
-    st->print_cr(" <unresolved string at %d>", i);
+    const char* string = constants->string_at_noresolve(i);
+    st->print_cr(" %s", string);
   } else if (tag.is_klass()) {
     st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
   } else if (tag.is_unresolved_klass()) {
     st->print_cr(" <unresolved klass at %d>", i);
   } else if (tag.is_object()) {
-    st->print_cr(" " PTR_FORMAT, constants->object_at(i));
+    st->print(" <Object>");
+    print_oop(constants->object_at(i), st);
+  } else if (tag.is_method_type()) {
+    int i2 = constants->method_type_index_at(i);
+    st->print(" <MethodType> %d", i2);
+    print_oop(constants->symbol_at(i2), st);
+  } else if (tag.is_method_handle()) {
+    int kind = constants->method_handle_ref_kind_at(i);
+    int i2 = constants->method_handle_index_at(i);
+    st->print(" <MethodHandle of kind %d>", kind, i2);
+    print_field_or_method(-i, i2, st);
   } else {
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
   }
@@ -297,7 +321,10 @@
 void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
   int orig_i = i;
   if (!check_index(orig_i, i, st))  return;
+  print_field_or_method(orig_i, i, st);
+}
 
+void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st) {
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
 
@@ -314,9 +341,11 @@
     return;
   }
 
+  symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i));
   symbolOop name = constants->uncached_name_ref_at(i);
   symbolOop signature = constants->uncached_signature_ref_at(i);
-  st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
+  const char* sep = (tag.is_field() ? "/" : "");
+  st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string());
 }
 
 
@@ -340,12 +369,20 @@
       st->print_cr(" " INT32_FORMAT, get_short());
       break;
     case Bytecodes::_ldc:
-      print_constant(get_index_u1(), st);
+      if (Bytecodes::uses_cp_cache(raw_code())) {
+        print_constant(get_index_u1_cpcache(), st);
+      } else {
+        print_constant(get_index_u1(), st);
+      }
       break;
 
     case Bytecodes::_ldc_w:
     case Bytecodes::_ldc2_w:
-      print_constant(get_index_u2(), st);
+      if (Bytecodes::uses_cp_cache(raw_code())) {
+        print_constant(get_index_u2_cpcache(), st);
+      } else {
+        print_constant(get_index_u2(), st);
+      }
       break;
 
     case Bytecodes::_iload:
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -489,6 +489,9 @@
 
   def(_return_register_finalizer , "return_register_finalizer" , "b"    , NULL    , T_VOID   ,  0, true, _return);
 
+  def(_fast_aldc           , "fast_aldc"           , "bj"   , NULL    , T_OBJECT,   1, true,  _ldc   );
+  def(_fast_aldc_w         , "fast_aldc_w"         , "bJJ"  , NULL    , T_OBJECT,   1, true,  _ldc_w );
+
   def(_shouldnotreachhere  , "_shouldnotreachhere" , "b"    , NULL    , T_VOID   ,  0, false);
 
   // platform specific JVM bytecodes
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -270,6 +270,10 @@
     _fast_linearswitch    ,
     _fast_binaryswitch    ,
 
+    // special handling of oop constants:
+    _fast_aldc            ,
+    _fast_aldc_w          ,
+
     _return_register_finalizer    ,
 
     _shouldnotreachhere,      // For debugging
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -267,20 +267,6 @@
 }
 #endif // PRODUCT
 
-static BasicType constant_pool_type(methodOop method, int index) {
-  constantTag tag = method->constants()->tag_at(index);
-       if (tag.is_int              ()) return T_INT;
-  else if (tag.is_float            ()) return T_FLOAT;
-  else if (tag.is_long             ()) return T_LONG;
-  else if (tag.is_double           ()) return T_DOUBLE;
-  else if (tag.is_string           ()) return T_OBJECT;
-  else if (tag.is_unresolved_string()) return T_OBJECT;
-  else if (tag.is_klass            ()) return T_OBJECT;
-  else if (tag.is_unresolved_klass ()) return T_OBJECT;
-  ShouldNotReachHere();
-  return T_ILLEGAL;
-}
-
 
 //------------------------------------------------------------------------------------------------------------------------
 // Deoptimization support
@@ -330,13 +316,15 @@
     }
 
     case Bytecodes::_ldc   :
-      type = constant_pool_type( method, *(bcp+1) );
-      break;
-
     case Bytecodes::_ldc_w : // fall through
     case Bytecodes::_ldc2_w:
-      type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) );
-      break;
+      {
+        Thread *thread = Thread::current();
+        ResourceMark rm(thread);
+        methodHandle mh(thread, method);
+        type = Bytecode_loadconstant_at(mh, bci)->result_type();
+        break;
+      }
 
     default:
       type = Bytecodes::result_type(code);
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -83,6 +83,18 @@
   }
 IRT_END
 
+IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) {
+  assert(bytecode == Bytecodes::_fast_aldc ||
+         bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
+  ResourceMark rm(thread);
+  methodHandle m (thread, method(thread));
+  Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread));
+  oop result = ldc->resolve_constant(THREAD);
+  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index()));
+  assert(result == cpce->f1(), "expected result for assembly code");
+}
+IRT_END
+
 
 //------------------------------------------------------------------------------------------------------------------------
 // Allocation
@@ -328,7 +340,7 @@
   typeArrayHandle    h_extable  (thread, h_method->exception_table());
   bool               should_repeat;
   int                handler_bci;
-  int                current_bci = bcp(thread) - h_method->code_base();
+  int                current_bci = bci(thread);
 
   // Need to do this check first since when _do_not_unlock_if_synchronized
   // is set, we don't want to trigger any classloading which may make calls
@@ -615,8 +627,7 @@
   if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
     ResourceMark rm(thread);
     methodHandle m (thread, method(thread));
-    int bci = m->bci_from(bcp(thread));
-    Bytecode_invoke* call = Bytecode_invoke_at(m, bci);
+    Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread));
     symbolHandle signature (thread, call->signature());
     receiver = Handle(thread,
                   thread->last_frame().interpreter_callee_receiver(signature));
@@ -1257,7 +1268,7 @@
   Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
   ArgumentSizeComputer asc(invoke->signature());
   int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
-  Copy::conjoint_bytes(src_address, dest_address,
+  Copy::conjoint_jbytes(src_address, dest_address,
                        size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -34,6 +34,7 @@
   static frame     last_frame(JavaThread *thread)    { return thread->last_frame(); }
   static methodOop method(JavaThread *thread)        { return last_frame(thread).interpreter_frame_method(); }
   static address   bcp(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bcp(); }
+  static int       bci(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bci(); }
   static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
   static Bytecodes::Code code(JavaThread *thread)    {
     // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
@@ -59,6 +60,7 @@
  public:
   // Constants
   static void    ldc           (JavaThread* thread, bool wide);
+  static void    resolve_ldc   (JavaThread* thread, Bytecodes::Code bytecode);
 
   // Allocation
   static void    _new          (JavaThread* thread, constantPoolOopDesc* pool, int index);
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -38,6 +38,8 @@
       case JVM_CONSTANT_InterfaceMethodref:
       case JVM_CONSTANT_Fieldref          : // fall through
       case JVM_CONSTANT_Methodref         : // fall through
+      case JVM_CONSTANT_MethodHandle      : // fall through
+      case JVM_CONSTANT_MethodType        : // fall through
         add_cp_cache_entry(i);
         break;
     }
@@ -131,6 +133,27 @@
 }
 
 
+// Rewrite some ldc bytecodes to _fast_aldc
+void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
+  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
+  address p = bcp + offset;
+  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
+  constantTag tag = _pool->tag_at(cp_index).value();
+  if (tag.is_method_handle() || tag.is_method_type()) {
+    int cache_index = cp_entry_to_cp_cache(cp_index);
+    if (is_wide) {
+      (*bcp) = Bytecodes::_fast_aldc_w;
+      assert(cache_index == (u2)cache_index, "");
+      Bytes::put_native_u2(p, cache_index);
+    } else {
+      (*bcp) = Bytecodes::_fast_aldc;
+      assert(cache_index == (u1)cache_index, "");
+      (*p) = (u1)cache_index;
+    }
+  }
+}
+
+
 // Rewrites a method given the index_map information
 void Rewriter::scan_method(methodOop method) {
 
@@ -198,6 +221,12 @@
         case Bytecodes::_invokedynamic:
           rewrite_invokedynamic(bcp, prefix_length+1);
           break;
+        case Bytecodes::_ldc:
+          maybe_rewrite_ldc(bcp, prefix_length+1, false);
+          break;
+        case Bytecodes::_ldc_w:
+          maybe_rewrite_ldc(bcp, prefix_length+1, true);
+          break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
         case Bytecodes::_monitorenter   : // fall through
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -66,6 +66,7 @@
   void rewrite_Object_init(methodHandle m, TRAPS);
   void rewrite_member_reference(address bcp, int offset);
   void rewrite_invokedynamic(address bcp, int offset);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
 
  public:
   // Driver routine:
--- a/hotspot/src/share/vm/interpreter/templateTable.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateTable.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -507,6 +507,9 @@
   def(Bytecodes::_fast_linearswitch   , ubcp|disp|____|____, itos, vtos, fast_linearswitch   ,  _           );
   def(Bytecodes::_fast_binaryswitch   , ubcp|disp|____|____, itos, vtos, fast_binaryswitch   ,  _           );
 
+  def(Bytecodes::_fast_aldc           , ubcp|____|clvm|____, vtos, atos, fast_aldc           ,  false       );
+  def(Bytecodes::_fast_aldc_w         , ubcp|____|clvm|____, vtos, atos, fast_aldc           ,  true        );
+
   def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return       ,  vtos        );
 
   def(Bytecodes::_shouldnotreachhere   , ____|____|____|____, vtos, vtos, shouldnotreachhere ,  _           );
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -123,6 +123,7 @@
   static void sipush();
   static void ldc(bool wide);
   static void ldc2_w();
+  static void fast_aldc(bool wide);
 
   static void locals_index(Register reg, int offset = 1);
   static void iload();
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,12 +284,19 @@
         committed_unique_to_self(ind, MemRegion(new_end_aligned,
                                                 cur_committed.end()));
       if (!uncommit_region.is_empty()) {
-        if (!os::uncommit_memory((char*)uncommit_region.start(),
-                                 uncommit_region.byte_size())) {
-          assert(false, "Card table contraction failed");
-          // The call failed so don't change the end of the
-          // committed region.  This is better than taking the
-          // VM down.
+        // It is not safe to uncommit cards if the boundary between
+        // the generations is moving.  A shrink can uncommit cards
+        // owned by generation A but being used by generation B.
+        if (!UseAdaptiveGCBoundary) {
+          if (!os::uncommit_memory((char*)uncommit_region.start(),
+                                   uncommit_region.byte_size())) {
+            assert(false, "Card table contraction failed");
+            // The call failed so don't change the end of the
+            // committed region.  This is better than taking the
+            // VM down.
+            new_end_aligned = _committed[ind].end();
+          }
+        } else {
           new_end_aligned = _committed[ind].end();
         }
       }
@@ -297,6 +304,19 @@
     // In any case, we can reset the end of the current committed entry.
     _committed[ind].set_end(new_end_aligned);
 
+#ifdef ASSERT
+    // Check that the last card in the new region is committed according
+    // to the tables.
+    bool covered = false;
+    for (int cr = 0; cr < _cur_covered_regions; cr++) {
+      if (_committed[cr].contains(new_end - 1)) {
+        covered = true;
+        break;
+      }
+    }
+    assert(covered, "Card for end of new region not committed");
+#endif
+
     // The default of 0 is not necessarily clean cards.
     jbyte* entry;
     if (old_region.last() < _whole_heap.start()) {
@@ -354,6 +374,9 @@
                   addr_for((jbyte*) _committed[ind].start()),
                   addr_for((jbyte*) _committed[ind].last()));
   }
+  // Touch the last card of the covered region to show that it
+  // is committed (or SEGV).
+  debug_only(*byte_for(_covered[ind].last());)
   debug_only(verify_guard();)
 }
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -179,9 +179,14 @@
     }
     n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
-  assert(total_reserved % pageSize == 0, "Gen size");
+  assert(total_reserved % pageSize == 0,
+         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
+                 SIZE_FORMAT, total_reserved, pageSize));
   total_reserved += perm_gen_spec->max_size();
-  assert(total_reserved % pageSize == 0, "Perm Gen size");
+  assert(total_reserved % pageSize == 0,
+         err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
+                 SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
+                 pageSize, perm_gen_spec->max_size()));
 
   if (total_reserved < perm_gen_spec->max_size()) {
     vm_exit_during_initialization(overflow_msg);
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -372,6 +372,13 @@
         entry->print_value_on(st);
         }
         break;
+      case JVM_CONSTANT_MethodHandle :
+        st->print("ref_kind=%d", cp->method_handle_ref_kind_at(index));
+        st->print(" ref_index=%d", cp->method_handle_index_at(index));
+        break;
+      case JVM_CONSTANT_MethodType :
+        st->print("signature_index=%d", cp->method_type_index_at(index));
+        break;
       default:
         ShouldNotReachHere();
         break;
@@ -437,6 +444,7 @@
           // can be non-perm, can be non-instance (array)
         }
       }
+      // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc.
       base++;
     }
     guarantee(cp->tags()->is_perm(),         "should be in permspace");
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -358,6 +358,11 @@
   return klass_at_noresolve(ref_index);
 }
 
+symbolOop constantPoolOopDesc::uncached_klass_ref_at_noresolve(int which) {
+  jint ref_index = uncached_klass_ref_index_at(which);
+  return klass_at_noresolve(ref_index);
+}
+
 char* constantPoolOopDesc::string_at_noresolve(int which) {
   // Test entry type in case string is resolved while in here.
   oop entry = *(obj_at_addr(which));
@@ -384,6 +389,119 @@
   }
 }
 
+oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS) {
+  oop result_oop = NULL;
+  if (cache_index >= 0) {
+    assert(index < 0, "only one kind of index at a time");
+    ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index);
+    result_oop = cpc_entry->f1();
+    if (result_oop != NULL) {
+      return result_oop;  // that was easy...
+    }
+    index = cpc_entry->constant_pool_index();
+  }
+
+  int tag_value = this_oop->tag_at(index).value();
+  switch (tag_value) {
+
+  case JVM_CONSTANT_UnresolvedClass:
+  case JVM_CONSTANT_UnresolvedClassInError:
+  case JVM_CONSTANT_Class:
+    {
+      klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL);
+      // ldc wants the java mirror.
+      result_oop = resolved->klass_part()->java_mirror();
+      break;
+    }
+
+  case JVM_CONSTANT_String:
+  case JVM_CONSTANT_UnresolvedString:
+    if (this_oop->is_pseudo_string_at(index)) {
+      result_oop = this_oop->pseudo_string_at(index);
+      break;
+    }
+    result_oop = string_at_impl(this_oop, index, CHECK_NULL);
+    break;
+
+  case JVM_CONSTANT_Object:
+    result_oop = this_oop->object_at(index);
+    break;
+
+  case JVM_CONSTANT_MethodHandle:
+    {
+      int ref_kind                 = this_oop->method_handle_ref_kind_at(index);
+      int callee_index             = this_oop->method_handle_klass_index_at(index);
+      symbolHandle name(THREAD,      this_oop->method_handle_name_ref_at(index));
+      symbolHandle signature(THREAD, this_oop->method_handle_signature_ref_at(index));
+      if (PrintMiscellaneous)
+        tty->print_cr("resolve JVM_CONSTANT_MethodHandle:%d [%d/%d/%d] %s.%s",
+                      ref_kind, index, this_oop->method_handle_index_at(index),
+                      callee_index, name->as_C_string(), signature->as_C_string());
+      KlassHandle callee;
+      { klassOop k = klass_at_impl(this_oop, callee_index, CHECK_NULL);
+        callee = KlassHandle(THREAD, k);
+      }
+      KlassHandle klass(THREAD, this_oop->pool_holder());
+      Handle value = SystemDictionary::link_method_handle_constant(klass, ref_kind,
+                                                                   callee, name, signature,
+                                                                   CHECK_NULL);
+      result_oop = value();
+      // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error.
+      break;
+    }
+
+  case JVM_CONSTANT_MethodType:
+    {
+      symbolHandle signature(THREAD, this_oop->method_type_signature_at(index));
+      if (PrintMiscellaneous)
+        tty->print_cr("resolve JVM_CONSTANT_MethodType [%d/%d] %s",
+                      index, this_oop->method_type_index_at(index),
+                      signature->as_C_string());
+      KlassHandle klass(THREAD, this_oop->pool_holder());
+      bool ignore_is_on_bcp = false;
+      Handle value = SystemDictionary::find_method_handle_type(signature,
+                                                               klass,
+                                                               ignore_is_on_bcp,
+                                                               CHECK_NULL);
+      result_oop = value();
+      // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error.
+      break;
+    }
+
+    /* maybe some day
+  case JVM_CONSTANT_Integer:
+  case JVM_CONSTANT_Float:
+  case JVM_CONSTANT_Long:
+  case JVM_CONSTANT_Double:
+    result_oop = java_lang_boxing_object::create(...);
+    break;
+    */
+
+  default:
+    DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d",
+                              this_oop(), index, cache_index, tag_value) );
+    assert(false, "unexpected constant tag");
+    break;
+  }
+
+  if (cache_index >= 0) {
+    // Cache the oop here also.
+    Handle result(THREAD, result_oop);
+    result_oop = NULL;  // safety
+    ObjectLocker ol(this_oop, THREAD);
+    ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index);
+    oop result_oop2 = cpc_entry->f1();
+    if (result_oop2 != NULL) {
+      // Race condition:  May already be filled in while we were trying to lock.
+      return result_oop2;
+    }
+    cpc_entry->set_f1(result());
+    return result();
+  } else {
+    return result_oop;
+  }
+}
+
 oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
   oop entry = *(this_oop->obj_at_addr(which));
   if (entry->is_symbol()) {
@@ -690,6 +808,28 @@
     }
   } break;
 
+  case JVM_CONSTANT_MethodType:
+  {
+    int k1 = method_type_index_at(index1);
+    int k2 = cp2->method_type_index_at(index2);
+    if (k1 == k2) {
+      return true;
+    }
+  } break;
+
+  case JVM_CONSTANT_MethodHandle:
+  {
+    int k1 = method_handle_ref_kind_at(index1);
+    int k2 = cp2->method_handle_ref_kind_at(index2);
+    if (k1 == k2) {
+      int i1 = method_handle_index_at(index1);
+      int i2 = cp2->method_handle_index_at(index2);
+      if (i1 == i2) {
+        return true;
+      }
+    }
+  } break;
+
   case JVM_CONSTANT_UnresolvedString:
   {
     symbolOop s1 = unresolved_string_at(index1);
@@ -863,6 +1003,19 @@
     to_cp->symbol_at_put(to_i, s);
   } break;
 
+  case JVM_CONSTANT_MethodType:
+  {
+    jint k = method_type_index_at(from_i);
+    to_cp->method_type_index_at_put(to_i, k);
+  } break;
+
+  case JVM_CONSTANT_MethodHandle:
+  {
+    int k1 = method_handle_ref_kind_at(from_i);
+    int k2 = method_handle_index_at(from_i);
+    to_cp->method_handle_index_at_put(to_i, k1, k2);
+  } break;
+
   // Invalid is used as the tag for the second constant pool entry
   // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should
   // not be seen by itself.
@@ -1066,8 +1219,12 @@
     case JVM_CONSTANT_UnresolvedClassInError:
     case JVM_CONSTANT_StringIndex:
     case JVM_CONSTANT_UnresolvedString:
+    case JVM_CONSTANT_MethodType:
       return 3;
 
+    case JVM_CONSTANT_MethodHandle:
+      return 4; //tag, ref_kind, ref_index
+
     case JVM_CONSTANT_Integer:
     case JVM_CONSTANT_Float:
     case JVM_CONSTANT_Fieldref:
@@ -1271,6 +1428,22 @@
         DBG(printf("JVM_CONSTANT_StringIndex: %hd", idx1));
         break;
       }
+      case JVM_CONSTANT_MethodHandle: {
+        *bytes = JVM_CONSTANT_MethodHandle;
+        int kind = method_handle_ref_kind_at(idx);
+        idx1 = method_handle_index_at(idx);
+        *(bytes+1) = (unsigned char) kind;
+        Bytes::put_Java_u2((address) (bytes+2), idx1);
+        DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
+        break;
+      }
+      case JVM_CONSTANT_MethodType: {
+        *bytes = JVM_CONSTANT_MethodType;
+        idx1 = method_type_index_at(idx);
+        Bytes::put_Java_u2((address) (bytes+1), idx1);
+        DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
+        break;
+      }
     }
     DBG(printf("\n"));
     bytes += ent_size;
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -146,6 +146,16 @@
     oop_store_without_check(obj_at_addr(which), oop(s));
   }
 
+  void method_handle_index_at_put(int which, int ref_kind, int ref_index) {
+    tag_at_put(which, JVM_CONSTANT_MethodHandle);
+    *int_at_addr(which) = ((jint) ref_index<<16) | ref_kind;
+  }
+
+  void method_type_index_at_put(int which, int ref_index) {
+    tag_at_put(which, JVM_CONSTANT_MethodType);
+    *int_at_addr(which) = ref_index;
+  }
+
   // Temporary until actual use
   void unresolved_string_at_put(int which, symbolOop s) {
     *obj_at_addr(which) = NULL;
@@ -357,6 +367,36 @@
     return *int_at_addr(which);
   }
 
+  int method_handle_ref_kind_at(int which) {
+    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+  }
+  int method_handle_index_at(int which) {
+    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+  }
+  int method_type_index_at(int which) {
+    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
+    return *int_at_addr(which);
+  }
+  // Derived queries:
+  symbolOop method_handle_name_ref_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_name_ref_at(member, true);
+  }
+  symbolOop method_handle_signature_ref_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_signature_ref_at(member, true);
+  }
+  int method_handle_klass_index_at(int which) {
+    int member = method_handle_index_at(which);
+    return impl_klass_ref_index_at(member, true);
+  }
+  symbolOop method_type_signature_at(int which) {
+    int sym = method_type_index_at(which);
+    return symbol_at(sym);
+  }
+
   // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
   // name_and_type_ref_index_at) all expect to be passed indices obtained
   // directly from the bytecode, and extracted according to java byte order.
@@ -388,6 +428,17 @@
     resolve_string_constants_impl(h_this, CHECK);
   }
 
+  // Resolve late bound constants.
+  oop resolve_constant_at(int index, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    return resolve_constant_at_impl(h_this, index, -1, THREAD);
+  }
+
+  oop resolve_cached_constant_at(int cache_index, TRAPS) {
+    constantPoolHandle h_this(THREAD, this);
+    return resolve_constant_at_impl(h_this, -1, cache_index, THREAD);
+  }
+
   // Klass name matches name at offset
   bool klass_name_at_matches(instanceKlassHandle k, int which);
 
@@ -420,6 +471,7 @@
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than possibly-byte-swapped
   // constant pool cache indices as do the peer methods above.
+  symbolOop uncached_klass_ref_at_noresolve(int which);
   symbolOop uncached_name_ref_at(int which)                 { return impl_name_ref_at(which, true); }
   symbolOop uncached_signature_ref_at(int which)            { return impl_signature_ref_at(which, true); }
   int       uncached_klass_ref_index_at(int which)          { return impl_klass_ref_index_at(which, true); }
@@ -436,6 +488,8 @@
 
 #ifdef ASSERT
   enum { CPCACHE_INDEX_TAG = 0x10000 };  // helps keep CP cache indices distinct from CP indices
+#else
+  enum { CPCACHE_INDEX_TAG = 0 };        // in product mode, this zero value is a no-op
 #endif //ASSERT
 
  private:
@@ -469,6 +523,8 @@
   // Resolve string constants (to prevent allocation during compilation)
   static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS);
 
+  static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS);
+
  public:
   // Merging constantPoolOop support:
   bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -110,6 +110,7 @@
 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
   friend class constantPoolCacheKlass;
+  friend class constantPoolOopDesc;  //resolve_constant_at_impl => set_f1
 
  private:
   volatile intx     _indices;  // constant pool index & rewrite bytecodes
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -472,9 +472,7 @@
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
             // Break dead loop data path.
             // Eagerly replace phis with top to avoid phis copies generation.
-            igvn->add_users_to_worklist(n);
-            igvn->hash_delete(n); // Yank from hash before hacking edges
-            igvn->subsume_node(n, top);
+            igvn->replace_node(n, top);
             if( max != outcnt() ) {
               progress = true;
               j = refresh_out_pos(j);
@@ -518,18 +516,17 @@
         igvn->hash_delete(n); // Remove from worklist before modifying edges
         if( n->is_Phi() ) {   // Collapse all Phis
           // Eagerly replace phis to avoid copies generation.
-          igvn->add_users_to_worklist(n);
-          igvn->hash_delete(n); // Yank from hash before hacking edges
+          Node* in;
           if( cnt == 0 ) {
             assert( n->req() == 1, "No data inputs expected" );
-            igvn->subsume_node(n, parent_ctrl); // replaced by top
+            in = parent_ctrl; // replaced by top
           } else {
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
-            Node* in1 = n->in(1);               // replaced by unique input
-            if( n->as_Phi()->is_unsafe_data_reference(in1) )
-              in1 = phase->C->top();            // replaced by top
-            igvn->subsume_node(n, in1);
+            in = n->in(1);               // replaced by unique input
+            if( n->as_Phi()->is_unsafe_data_reference(in) )
+              in = phase->C->top();      // replaced by top
           }
+          igvn->replace_node(n, in);
         }
         else if( n->is_Region() ) { // Update all incoming edges
           assert( !igvn->eqv(n, this), "Must be removed from DefUse edges");
@@ -2127,7 +2124,7 @@
     // if it's not there, there's nothing to do.
     Node* fallthru = proj_out(0);
     if (fallthru != NULL) {
-      phase->is_IterGVN()->subsume_node(fallthru, in(0));
+      phase->is_IterGVN()->replace_node(fallthru, in(0));
     }
     return phase->C->top();
   }
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -2000,6 +2000,17 @@
     }
   }
 
+#ifdef ASSERT
+  if( n->is_Mem() ) {
+    Compile* C = Compile::current();
+    int alias_idx = C->get_alias_index(n->as_Mem()->adr_type());
+    assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
+            // oop will be recorded in oop map if load crosses safepoint
+            n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
+                             LoadNode::is_immutable_value(n->in(MemNode::Address))),
+            "raw memory operations should have control edge");
+  }
+#endif
   // Count FPU ops and common calls, implements item (3)
   switch( nop ) {
   // Count all float operations that may use FPU
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1789,9 +1789,10 @@
 
 void GraphKit::increment_counter(Node* counter_addr) {
   int adr_type = Compile::AliasIdxRaw;
-  Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
+  Node* ctrl = control();
+  Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
   Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
-  store_to_memory( NULL, counter_addr, incr, T_INT, adr_type );
+  store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
 }
 
 
@@ -2771,11 +2772,7 @@
     // Update the counter for this lock.  Don't bother using an atomic
     // operation since we don't require absolute accuracy.
     lock->create_lock_counter(map()->jvms());
-    int adr_type = Compile::AliasIdxRaw;
-    Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr()));
-    Node* cnt  = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type);
-    Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1)));
-    store_to_memory(control(), counter_addr, incr, T_INT, adr_type);
+    increment_counter(lock->counter()->addr());
   }
 #endif
 
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1081,11 +1081,9 @@
 
   igvn->register_new_node_with_optimizer(new_if_f);
   igvn->register_new_node_with_optimizer(new_if_t);
-  igvn->hash_delete(old_if_f);
-  igvn->hash_delete(old_if_t);
   // Flip test, so flip trailing control
-  igvn->subsume_node(old_if_f, new_if_t);
-  igvn->subsume_node(old_if_t, new_if_f);
+  igvn->replace_node(old_if_f, new_if_t);
+  igvn->replace_node(old_if_t, new_if_f);
 
   // Progress
   return iff;
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -3512,8 +3512,7 @@
 
   // Get the header out of the object, use LoadMarkNode when available
   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
-  Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS);
-  header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) );
+  Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type());
 
   // Test the header to see if it is unlocked.
   Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
@@ -5202,7 +5201,7 @@
   // super_check_offset, for the desired klass.
   int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
-  Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM);
+  Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
   Node* check_offset = _gvn.transform(n3);
   Node* check_value  = dest_elem_klass;
 
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -194,8 +194,7 @@
     addx = new (phase->C, 3) AddINode(x, inv);
   }
   phase->register_new_node(addx, phase->get_ctrl(x));
-  phase->_igvn.hash_delete(n1);
-  phase->_igvn.subsume_node(n1, addx);
+  phase->_igvn.replace_node(n1, addx);
   return addx;
 }
 
@@ -1586,8 +1585,7 @@
   Node *phi = cl->phi();
   Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
-  phase->_igvn.hash_delete(phi);
-  phase->_igvn.subsume_node(phi,final);
+  phase->_igvn.replace_node(phi,final);
   phase->C->set_major_progress();
   return true;
 }
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -400,7 +400,7 @@
     nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
     nphi = _igvn.register_new_node_with_optimizer(nphi);
     set_ctrl(nphi, get_ctrl(phi));
-    _igvn.subsume_node(phi, nphi);
+    _igvn.replace_node(phi, nphi);
     phi = nphi->as_Phi();
   }
   cmp = cmp->clone();
@@ -760,7 +760,7 @@
       // which in turn prevents removing an empty loop.
       Node *id_old_phi = old_phi->Identity( &igvn );
       if( id_old_phi != old_phi ) { // Found a simple identity?
-        // Note that I cannot call 'subsume_node' here, because
+        // Note that I cannot call 'replace_node' here, because
         // that will yank the edge from old_phi to the Region and
         // I'm mid-iteration over the Region's uses.
         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
@@ -1065,11 +1065,9 @@
     l = igvn.register_new_node_with_optimizer(l, _head);
     phase->set_created_loop_node();
     // Go ahead and replace _head
-    phase->_igvn.subsume_node( _head, l );
+    phase->_igvn.replace_node( _head, l );
     _head = l;
     phase->set_loop(_head, this);
-    for (DUIterator_Fast imax, i = l->fast_outs(imax); i < imax; i++)
-      phase->_igvn.add_users_to_worklist(l->fast_out(i));
   }
 
   // Now recursively beautify nested loops
@@ -1329,8 +1327,7 @@
         Node* add  = new (C, 3) AddINode(ratio_idx, diff);
         phase->_igvn.register_new_node_with_optimizer(add);
         phase->set_ctrl(add, cl);
-        phase->_igvn.hash_delete( phi2 );
-        phase->_igvn.subsume_node( phi2, add );
+        phase->_igvn.replace_node( phi2, add );
         // Sometimes an induction variable is unused
         if (add->outcnt() == 0) {
           phase->_igvn.remove_dead_node(add);
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -626,8 +626,7 @@
     _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) );
   }
   void lazy_replace( Node *old_node, Node *new_node ) {
-    _igvn.hash_delete(old_node);
-    _igvn.subsume_node( old_node, new_node );
+    _igvn.replace_node( old_node, new_node );
     lazy_update( old_node, new_node );
   }
   void lazy_replace_proj( Node *old_node, Node *new_node ) {
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -354,8 +354,7 @@
     register_new_node( var_scale, n_ctrl );
     Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
     register_new_node( var_add, n_ctrl );
-    _igvn.hash_delete( n );
-    _igvn.subsume_node( n, var_add );
+    _igvn.replace_node( n, var_add );
     return var_add;
   }
 
@@ -390,8 +389,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, n->in(2)->in(3) );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -412,8 +410,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, V );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -555,8 +552,7 @@
     }
     Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) );
     register_new_node( cmov, cmov_ctrl );
-    _igvn.hash_delete(phi);
-    _igvn.subsume_node( phi, cmov );
+    _igvn.replace_node( phi, cmov );
 #ifndef PRODUCT
     if( VerifyLoopOptimizations ) verify();
 #endif
@@ -642,8 +638,7 @@
 
   // Found a Phi to split thru!
   // Replace 'n' with the new phi
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
   // Moved a load around the loop, 'en-registering' something.
   if( n_blk->Opcode() == Op_Loop && n->is_Load() &&
       !phi->in(LoopNode::LoopBackControl)->is_Load() )
@@ -789,13 +784,11 @@
 
     // Found a Phi to split thru!
     // Replace 'n' with the new phi
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, phi );
+    _igvn.replace_node( n, phi );
 
     // Now split the bool up thru the phi
     Node *bolphi = split_thru_phi( bol, n_ctrl, -1 );
-    _igvn.hash_delete(bol);
-    _igvn.subsume_node( bol, bolphi );
+    _igvn.replace_node( bol, bolphi );
     assert( iff->in(1) == bolphi, "" );
     if( bolphi->Value(&_igvn)->singleton() )
       return;
@@ -803,8 +796,7 @@
     // Conditional-move?  Must split up now
     if( !iff->is_If() ) {
       Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 );
-      _igvn.hash_delete(iff);
-      _igvn.subsume_node( iff, cmovphi );
+      _igvn.replace_node( iff, cmovphi );
       return;
     }
 
@@ -950,9 +942,7 @@
   if( n_op == Op_Opaque2 &&
       n->in(1) != NULL &&
       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
-    _igvn.add_users_to_worklist(n);
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, n->in(1) );
+    _igvn.replace_node( n, n->in(1) );
   }
 }
 
@@ -1425,7 +1415,7 @@
           // IGVN does CSE).
           Node *hit = _igvn.hash_find_insert(use);
           if( hit )             // Go ahead and re-hash for hits.
-            _igvn.subsume_node( use, hit );
+            _igvn.replace_node( use, hit );
         }
 
         // If 'use' was in the loop-exit block, it now needs to be sunk
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -135,8 +135,7 @@
   if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
   copy_call_debug_info(oldcall, call);
   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
-  _igvn.hash_delete(oldcall);
-  _igvn.subsume_node(oldcall, call);
+  _igvn.replace_node(oldcall, call);
   transform_later(call);
 
   return call;
@@ -523,8 +522,7 @@
         // Kill all new Phis
         while(value_phis.is_nonempty()) {
           Node* n = value_phis.node();
-          _igvn.hash_delete(n);
-          _igvn.subsume_node(n, C->top());
+          _igvn.replace_node(n, C->top());
           value_phis.pop();
         }
       }
@@ -1311,8 +1309,7 @@
   if (!always_slow) {
     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
   }
-  _igvn.hash_delete(alloc);
-  _igvn.subsume_node(alloc, call);
+  _igvn.replace_node(alloc, call);
   transform_later(call);
 
   // Identify the output projections from the allocate node and
@@ -1431,7 +1428,7 @@
   Node* mark_node = NULL;
   // For now only enable fast locking for non-array types
   if (UseBiasedLocking && (length == NULL)) {
-    mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
+    mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
   } else {
     mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
   }
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -815,6 +815,16 @@
 }
 #endif
 
+#ifdef ASSERT
+//----------------------------is_immutable_value-------------------------------
+// Helper function to allow a raw load without control edge for some cases
+bool LoadNode::is_immutable_value(Node* adr) {
+  return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
+          adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
+          (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
+           in_bytes(JavaThread::osthread_offset())));
+}
+#endif
 
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
@@ -828,6 +838,11 @@
   assert(!(adr_type->isa_aryptr() &&
            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
          "use LoadRangeNode instead");
+  // Check control edge of raw loads
+  assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+          // oop will be recorded in oop map if load crosses safepoint
+          rt->isa_oopptr() || is_immutable_value(adr),
+          "raw memory operations should have control edge");
   switch (bt) {
   case T_BOOLEAN: return new (C, 3) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int()    );
   case T_BYTE:    return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int()    );
@@ -2064,6 +2079,8 @@
 // Polymorphic factory method:
 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
   Compile* C = gvn.C;
+  assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+          ctl != NULL, "raw memory operations should have control edge");
 
   switch (bt) {
   case T_BOOLEAN:
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -189,6 +189,10 @@
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
+#ifdef ASSERT
+  // Helper function to allow a raw load without control edge for some cases
+  static bool is_immutable_value(Node* adr);
+#endif
 protected:
   const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                      ciKlass* klass) const;
--- a/hotspot/src/share/vm/opto/parse1.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -88,15 +88,16 @@
                                      Node *local_addrs_base) {
   Node *mem = memory(Compile::AliasIdxRaw);
   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
+  Node *ctl = control();
 
   // Very similar to LoadNode::make, except we handle un-aligned longs and
   // doubles on Sparc.  Intel can handle them just fine directly.
   Node *l;
   switch( bt ) {                // Signature is flattened
-  case T_INT:     l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_FLOAT:   l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
-  case T_OBJECT:  l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
+  case T_INT:     l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
+  case T_FLOAT:   l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
+  case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
+  case T_OBJECT:  l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
   case T_LONG:
   case T_DOUBLE: {
     // Since arguments are in reverse order, the argument address 'adr'
@@ -104,12 +105,12 @@
     adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
     if( Matcher::misaligned_doubles_ok ) {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
+        : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
     } else {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
+        : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
     }
     break;
   }
--- a/hotspot/src/share/vm/opto/parse2.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1324,33 +1324,21 @@
   case Bytecodes::_ldc_w:
   case Bytecodes::_ldc2_w:
     // If the constant is unresolved, run this BC once in the interpreter.
-    if (iter().is_unresolved_string()) {
-      uncommon_trap(Deoptimization::make_trap_request
-                    (Deoptimization::Reason_unloaded,
-                     Deoptimization::Action_reinterpret,
-                     iter().get_constant_index()),
-                    NULL, "unresolved_string");
-      break;
-    } else {
+    {
       ciConstant constant = iter().get_constant();
-      if (constant.basic_type() == T_OBJECT) {
-        ciObject* c = constant.as_object();
-        if (c->is_klass()) {
-          // The constant returned for a klass is the ciKlass for the
-          // entry.  We want the java_mirror so get it.
-          ciKlass* klass = c->as_klass();
-          if (klass->is_loaded()) {
-            constant = ciConstant(T_OBJECT, klass->java_mirror());
-          } else {
-            uncommon_trap(Deoptimization::make_trap_request
-                          (Deoptimization::Reason_unloaded,
-                           Deoptimization::Action_reinterpret,
-                           iter().get_constant_index()),
-                          NULL, "unresolved_klass");
-            break;
-          }
-        }
+      if (constant.basic_type() == T_OBJECT &&
+          !constant.as_object()->is_loaded()) {
+        int index = iter().get_constant_pool_index();
+        constantTag tag = iter().get_constant_pool_tag(index);
+        uncommon_trap(Deoptimization::make_trap_request
+                      (Deoptimization::Reason_unloaded,
+                       Deoptimization::Action_reinterpret,
+                       index),
+                      NULL, tag.internal_name());
+        break;
       }
+      assert(constant.basic_type() != T_OBJECT || !constant.as_object()->is_klass(),
+             "must be java_mirror of klass");
       bool pushed = push_constant(constant, true);
       guarantee(pushed, "must be possible to push this constant");
     }
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1447,16 +1447,12 @@
           Node* m = n->out(i);
           if( m->is_Phi() ) {
             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
-            add_users_to_worklist(m);
-            hash_delete(m); // Yank from hash before hacking edges
-            subsume_node(m, nn);
+            replace_node(m, nn);
             --i; // deleted this phi; rescan starting with next position
           }
         }
       }
-      add_users_to_worklist(n); // Users of about-to-be-constant 'n'
-      hash_delete(n);           // Removed 'n' from table before subsuming it
-      subsume_node(n,nn);       // Update DefUse edges for new constant
+      replace_node(n,nn);       // Update DefUse edges for new constant
     }
     return nn;
   }
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -393,6 +393,10 @@
 
   // Idealize old Node 'n' with respect to its inputs and its value
   virtual Node *transform_old( Node *a_node );
+
+  // Subsume users of node 'old' into node 'nn'
+  void subsume_node( Node *old, Node *nn );
+
 protected:
 
   // Idealize new Node 'n' with respect to its inputs and its value
@@ -439,10 +443,6 @@
     remove_globally_dead_node(dead);
   }
 
-  // Subsume users of node 'old' into node 'nn'
-  // If no Def-Use info existed for 'nn' it will after call.
-  void subsume_node( Node *old, Node *nn );
-
   // Add users of 'n' to worklist
   void add_users_to_worklist0( Node *n );
   void add_users_to_worklist ( Node *n );
@@ -450,7 +450,7 @@
   // Replace old node with new one.
   void replace_node( Node *old, Node *nn ) {
     add_users_to_worklist(old);
-    hash_delete(old);
+    hash_delete(old); // Yank from hash before hacking edges
     subsume_node(old, nn);
   }
 
--- a/hotspot/src/share/vm/opto/split_if.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/split_if.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -217,8 +217,7 @@
   register_new_node(phi, blk1);
 
   // Remove cloned-up value from optimizer; use phi instead
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
 
   // (There used to be a self-recursive call to split_up() here,
   // but it is not needed.  All necessary forward walking is done
@@ -352,8 +351,7 @@
   }
 
   if (use_blk == NULL) {        // He's dead, Jim
-    _igvn.hash_delete(use);
-    _igvn.subsume_node(use, C->top());
+    _igvn.replace_node(use, C->top());
   }
 
   return use_blk;
--- a/hotspot/src/share/vm/opto/superword.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/superword.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1172,8 +1172,7 @@
       _phase->set_ctrl(vn, _phase->get_ctrl(p->at(0)));
       for (uint j = 0; j < p->size(); j++) {
         Node* pm = p->at(j);
-        _igvn.hash_delete(pm);
-        _igvn.subsume_node(pm, vn);
+        _igvn.replace_node(pm, vn);
       }
       _igvn._worklist.push(vn);
     }
--- a/hotspot/src/share/vm/opto/type.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/opto/type.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -182,6 +182,8 @@
   return t->hash();
 }
 
+#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
+
 //--------------------------Initialize_shared----------------------------------
 void Type::Initialize_shared(Compile* current) {
   // This method does not need to be locked because the first system
@@ -240,6 +242,7 @@
   assert( TypeInt::CC_GT == TypeInt::ONE,     "types must match for CmpL to work" );
   assert( TypeInt::CC_EQ == TypeInt::ZERO,    "types must match for CmpL to work" );
   assert( TypeInt::CC_GE == TypeInt::BOOL,    "types must match for CmpL to work" );
+  assert( (juint)(TypeInt::CC->_hi - TypeInt::CC->_lo) <= SMALLINT, "CC is truly small");
 
   TypeLong::MINUS_1 = TypeLong::make(-1);        // -1
   TypeLong::ZERO    = TypeLong::make( 0);        //  0
@@ -1054,16 +1057,21 @@
   return (TypeInt*)(new TypeInt(lo,lo,WidenMin))->hashcons();
 }
 
-#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
-
-const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+static int normalize_int_widen( jint lo, jint hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
   // The 'SMALLINT' covers constants and also CC and its relatives.
-  assert(CC == NULL || (juint)(CC->_hi - CC->_lo) <= SMALLINT, "CC is truly small");
   if (lo <= hi) {
-    if ((juint)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
-    if ((juint)(hi - lo) >= max_juint)  w = Type::WidenMax; // plain int
+    if ((juint)(hi - lo) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT
+  } else {
+    if ((juint)(lo - hi) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT
   }
+  return w;
+}
+
+const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+  w = normalize_int_widen(lo, hi, w);
   return (TypeInt*)(new TypeInt(lo,hi,w))->hashcons();
 }
 
@@ -1103,14 +1111,14 @@
 
   // Expand covered set
   const TypeInt *r = t->is_int();
-  // (Avoid TypeInt::make, to avoid the argument normalizations it enforces.)
-  return (new TypeInt( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeInt::xdual() const {
-  return new TypeInt(_hi,_lo,WidenMax-_widen);
+  int w = normalize_int_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeInt(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1202,7 +1210,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeInt::filter( const Type *kills ) const {
   const TypeInt* ft = join(kills)->isa_int();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
@@ -1304,13 +1312,21 @@
   return (TypeLong*)(new TypeLong(lo,lo,WidenMin))->hashcons();
 }
 
-const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+static int normalize_long_widen( jlong lo, jlong hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
-  // The '1' covers constants.
+  // The 'SMALLINT' covers constants.
   if (lo <= hi) {
-    if ((julong)(hi - lo) <= SMALLINT)    w = Type::WidenMin;
-    if ((julong)(hi - lo) >= max_julong)  w = Type::WidenMax; // plain long
+    if ((julong)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG
+  } else {
+    if ((julong)(lo - hi) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG
   }
+  return w;
+}
+
+const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+  w = normalize_long_widen(lo, hi, w);
   return (TypeLong*)(new TypeLong(lo,hi,w))->hashcons();
 }
 
@@ -1351,14 +1367,14 @@
 
   // Expand covered set
   const TypeLong *r = t->is_long(); // Turn into a TypeLong
-  // (Avoid TypeLong::make, to avoid the argument normalizations it enforces.)
-  return (new TypeLong( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeLong::xdual() const {
-  return new TypeLong(_hi,_lo,WidenMax-_widen);
+  int w = normalize_long_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeLong(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1453,7 +1469,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeLong::filter( const Type *kills ) const {
   const TypeLong* ft = join(kills)->isa_long();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
--- a/hotspot/src/share/vm/prims/jvm.h	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvm.h	Fri Jul 02 01:36:15 2010 -0700
@@ -1044,7 +1044,22 @@
     JVM_CONSTANT_Fieldref,
     JVM_CONSTANT_Methodref,
     JVM_CONSTANT_InterfaceMethodref,
-    JVM_CONSTANT_NameAndType
+    JVM_CONSTANT_NameAndType,
+    JVM_CONSTANT_MethodHandle           = 15,  // JSR 292
+    JVM_CONSTANT_MethodType             = 16   // JSR 292
+};
+
+/* JVM_CONSTANT_MethodHandle subtypes */
+enum {
+    JVM_REF_getField                = 1,
+    JVM_REF_getStatic               = 2,
+    JVM_REF_putField                = 3,
+    JVM_REF_putStatic               = 4,
+    JVM_REF_invokeVirtual           = 5,
+    JVM_REF_invokeStatic            = 6,
+    JVM_REF_invokeSpecial           = 7,
+    JVM_REF_newInvokeSpecial        = 8,
+    JVM_REF_invokeInterface         = 9
 };
 
 /* Used in the newarray instruction. */
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -217,21 +217,21 @@
 
 class nmethodDesc: public CHeapObj {
  private:
-  methodHandle _method;
+  jmethodID _jmethod_id;
   address _code_begin;
   address _code_end;
   jvmtiAddrLocationMap* _map;
   jint _map_length;
  public:
-  nmethodDesc(methodHandle method, address code_begin, address code_end,
+  nmethodDesc(jmethodID jmethod_id, address code_begin, address code_end,
               jvmtiAddrLocationMap* map, jint map_length) {
-    _method = method;
+    _jmethod_id = jmethod_id;
     _code_begin = code_begin;
     _code_end = code_end;
     _map = map;
     _map_length = map_length;
   }
-  methodHandle method() const           { return _method; }
+  jmethodID jmethod_id() const          { return _jmethod_id; }
   address code_begin() const            { return _code_begin; }
   address code_end() const              { return _code_end; }
   jvmtiAddrLocationMap* map() const     { return _map; }
@@ -323,8 +323,7 @@
   JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);
 
   // record the nmethod details
-  methodHandle mh(nm->method());
-  nmethodDesc* snm = new nmethodDesc(mh,
+  nmethodDesc* snm = new nmethodDesc(nm->get_and_cache_jmethod_id(),
                                      nm->code_begin(),
                                      nm->code_end(),
                                      map,
@@ -367,8 +366,7 @@
   // iterate over the  list and post an event for each nmethod
   nmethodDesc* nm_desc = collector.first();
   while (nm_desc != NULL) {
-    methodOop method = nm_desc->method()();
-    jmethodID mid = method->jmethod_id();
+    jmethodID mid = nm_desc->jmethod_id();
     assert(mid != NULL, "checking");
     JvmtiExport::post_compiled_method_load(env, mid,
                                            (jint)(nm_desc->code_end() - nm_desc->code_begin()),
--- a/hotspot/src/share/vm/prims/methodComparator.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/prims/methodComparator.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -163,10 +163,10 @@
 
   case Bytecodes::_ldc   : // fall through
   case Bytecodes::_ldc_w : {
-    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp());
-    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp());
-    int cpi_old = ldc_old->index();
-    int cpi_new = ldc_new->index();
+    Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method(), _s_old->bci());
+    Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method(), _s_new->bci());
+    int cpi_old = ldc_old->pool_index();
+    int cpi_new = ldc_new->pool_index();
     constantTag tag_old = _old_cp->tag_at(cpi_old);
     constantTag tag_new = _new_cp->tag_at(cpi_new);
     if (tag_old.is_int() || tag_old.is_float()) {
@@ -187,12 +187,30 @@
       if (strcmp(_old_cp->string_at_noresolve(cpi_old),
                  _new_cp->string_at_noresolve(cpi_new)) != 0)
         return false;
-    } else { // tag_old should be klass - 4881222
+    } else if (tag_old.is_klass() || tag_old.is_unresolved_klass()) {
+      // tag_old should be klass - 4881222
       if (! (tag_new.is_unresolved_klass() || tag_new.is_klass()))
         return false;
       if (_old_cp->klass_at_noresolve(cpi_old) !=
           _new_cp->klass_at_noresolve(cpi_new))
         return false;
+    } else if (tag_old.is_method_type() && tag_new.is_method_type()) {
+      int mti_old = _old_cp->method_type_index_at(cpi_old);
+      int mti_new = _new_cp->method_type_index_at(cpi_new);
+      if ((_old_cp->symbol_at(mti_old) != _new_cp->symbol_at(mti_new)))
+        return false;
+    } else if (tag_old.is_method_handle() && tag_new.is_method_handle()) {
+      if (_old_cp->method_handle_ref_kind_at(cpi_old) !=
+          _new_cp->method_handle_ref_kind_at(cpi_new))
+        return false;
+      int mhi_old = _old_cp->method_handle_index_at(cpi_old);
+      int mhi_new = _new_cp->method_handle_index_at(cpi_new);
+      if ((_old_cp->uncached_klass_ref_at_noresolve(mhi_old) != _new_cp->uncached_klass_ref_at_noresolve(mhi_new)) ||
+          (_old_cp->uncached_name_ref_at(mhi_old) != _new_cp->uncached_name_ref_at(mhi_new)) ||
+          (_old_cp->uncached_signature_ref_at(mhi_old) != _new_cp->uncached_signature_ref_at(mhi_new)))
+        return false;
+    } else {
+      return false;  // unknown tag
     }
     break;
   }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1376,11 +1376,6 @@
   }
   no_shared_spaces();
 
-  // Set the maximum pause time goal to be a reasonable default.
-  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
-    FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
-  }
-
   if (FLAG_IS_DEFAULT(MarkStackSize)) {
     FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
   }
@@ -1513,6 +1508,9 @@
   if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
     FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
   }
+  if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeStringConcat)) {
+    FLAG_SET_DEFAULT(OptimizeStringConcat, true);
+  }
 #endif
 
   if (AggressiveOpts) {
@@ -1697,20 +1695,21 @@
 
   status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
 
-  // Check user specified sharing option conflict with Parallel GC
-  bool cannot_share = ((UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || UseParNewGC ||
-                       UseParallelGC || UseParallelOldGC ||
-                       SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages));
-
+  // Check whether user-specified sharing option conflicts with GC or page size.
+  // Both sharing and large pages are enabled by default on some platforms;
+  // large pages override sharing only if explicitly set on the command line.
+  const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
+          UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
+          UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
   if (cannot_share) {
     // Either force sharing on by forcing the other options off, or
     // force sharing off.
     if (DumpSharedSpaces || ForceSharedSpaces) {
       jio_fprintf(defaultStream::error_stream(),
-                  "Reverting to Serial GC because of %s\n",
-                  ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
+                  "Using Serial GC and default page size because of %s\n",
+                  ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump");
       force_serial_gc();
-      FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
+      FLAG_SET_DEFAULT(UseLargePages, false);
     } else {
       if (UseSharedSpaces && Verbose) {
         jio_fprintf(defaultStream::error_stream(),
@@ -1719,6 +1718,8 @@
       }
       no_shared_spaces();
     }
+  } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) {
+    FLAG_SET_DEFAULT(UseLargePages, false);
   }
 
   status = status && check_gc_consistency();
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -1975,7 +1975,7 @@
           "Adaptive size policy maximum GC pause time goal in msec, "       \
           "or (G1 Only) the max. GC time per MMU time slice")               \
                                                                             \
-  product(intx, GCPauseIntervalMillis, 500,                                 \
+  product(uintx, GCPauseIntervalMillis, 0,                                  \
           "Time slice for MMU specification")                               \
                                                                             \
   product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -66,6 +66,7 @@
 
 
 jobject JNIHandles::make_global(Handle obj) {
+  assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
@@ -81,6 +82,7 @@
 
 
 jobject JNIHandles::make_weak_global(Handle obj) {
+  assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
   jobject res = NULL;
   if (!obj.is_null()) {
     // ignore null handles
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -779,7 +779,7 @@
 
   // Find bytecode
   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
-  bc = bytecode->adjusted_invoke_code();
+  bc = bytecode->java_code();
   int bytecode_index = bytecode->index();
 
   // Find receiver for non-static call
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -135,28 +135,32 @@
 static void test_arraycopy_func(address func, int alignment) {
   int v = 0xcc;
   int v2 = 0x11;
-  jlong lbuffer[2];
-  jlong lbuffer2[2];
-  address buffer  = (address) lbuffer;
-  address buffer2 = (address) lbuffer2;
+  jlong lbuffer[8];
+  jlong lbuffer2[8];
+  address fbuffer  = (address) lbuffer;
+  address fbuffer2 = (address) lbuffer2;
   unsigned int i;
   for (i = 0; i < sizeof(lbuffer); i++) {
-    buffer[i] = v; buffer2[i] = v2;
+    fbuffer[i] = v; fbuffer2[i] = v2;
   }
+  // C++ does not guarantee jlong[] array alignment to 8 bytes.
+  // Use middle of array to check that memory before it is not modified.
+  address buffer  = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
+  address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
   // do an aligned copy
   ((arraycopy_fn)func)(buffer, buffer2, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
   // adjust destination alignment
   ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
   // adjust source alignment
   ((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
   for (i = 0; i < sizeof(lbuffer); i++) {
-    assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+    assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
   }
 }
 #endif
@@ -183,7 +187,7 @@
   test_arraycopy_func(arrayof_##type##_arraycopy(),          sizeof(HeapWord)); \
   test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
 
-  // Make sure all the arraycopy stubs properly handle zeros
+  // Make sure all the arraycopy stubs properly handle zero count
   TEST_ARRAYCOPY(jbyte);
   TEST_ARRAYCOPY(jshort);
   TEST_ARRAYCOPY(jint);
@@ -191,6 +195,25 @@
 
 #undef TEST_ARRAYCOPY
 
+#define TEST_COPYRTN(type) \
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic),  sizeof(type)); \
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
+
+  // Make sure all the copy runtime routines properly handle zero count
+  TEST_COPYRTN(jbyte);
+  TEST_COPYRTN(jshort);
+  TEST_COPYRTN(jint);
+  TEST_COPYRTN(jlong);
+
+#undef TEST_COPYRTN
+
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord));
+  // Aligned to BytesPerLong
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
+  test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
+
 #endif
 }
 
@@ -221,15 +244,13 @@
 #ifndef PRODUCT
   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
-  Copy::conjoint_bytes_atomic(src, dest, count);
+  Copy::conjoint_jbytes_atomic(src, dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
 #ifndef PRODUCT
   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jshorts_atomic(src, dest, count);
 JRT_END
 
@@ -237,7 +258,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jints_atomic(src, dest, count);
 JRT_END
 
@@ -245,7 +265,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::conjoint_jlongs_atomic(src, dest, count);
 JRT_END
 
@@ -263,15 +282,13 @@
 #ifndef PRODUCT
   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
-  Copy::arrayof_conjoint_bytes(src, dest, count);
+  Copy::arrayof_conjoint_jbytes(src, dest, count);
 JRT_END
 
 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
 #ifndef PRODUCT
   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jshorts(src, dest, count);
 JRT_END
 
@@ -279,7 +296,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jints(src, dest, count);
 JRT_END
 
@@ -287,7 +303,6 @@
 #ifndef PRODUCT
   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
 #endif // !PRODUCT
-  assert(count != 0, "count should be non-zero");
   Copy::arrayof_conjoint_jlongs(src, dest, count);
 JRT_END
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -26,7 +26,7 @@
 # include "incls/_sweeper.cpp.incl"
 
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
-CodeBlob* NMethodSweeper::_current = NULL;   // Current nmethod
+nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 int       NMethodSweeper::_seen = 0 ;        // No. of blobs we have currently processed in current pass of CodeCache
 int       NMethodSweeper::_invocations = 0;  // No. of invocations left until we are completed with this pass
 
@@ -171,20 +171,16 @@
       // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
       // Other blobs can be deleted by other threads
       // Read next before we potentially delete current
-      CodeBlob* next = CodeCache::next_nmethod(_current);
+      nmethod* next = CodeCache::next_nmethod(_current);
 
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        process_nmethod((nmethod *)_current);
+        process_nmethod(_current);
       }
       _seen++;
       _current = next;
     }
-
-    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
-    // can be freed async to us and make _current invalid while we sleep.
-    _current = CodeCache::next_nmethod(_current);
   }
 
   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -29,7 +29,7 @@
 
 class NMethodSweeper : public AllStatic {
   static long      _traversals;   // Stack traversal count
-  static CodeBlob* _current;      // Current nmethod
+  static nmethod*  _current;      // Current nmethod
   static int       _seen;         // Nof. nmethod we have currently processed in current pass of CodeCache
   static int       _invocations;  // No. of invocations left until we are completed with this pass
 
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -2700,7 +2700,7 @@
   if (in_bytes(size_in_bytes) != 0) {
     _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
     _popframe_preserved_args_size = in_bytes(size_in_bytes);
-    Copy::conjoint_bytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
+    Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/vframeArray.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vframeArray.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -355,9 +355,9 @@
       } else {
         base = iframe()->interpreter_frame_expression_stack();
       }
-      Copy::conjoint_bytes(saved_args,
-                           base,
-                           popframe_preserved_args_size_in_bytes);
+      Copy::conjoint_jbytes(saved_args,
+                            base,
+                            popframe_preserved_args_size_in_bytes);
       thread->popframe_free_preserved_args();
     }
   }
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -111,6 +111,35 @@
   return result;
 }
 
+// Helper method.
+static bool failed_to_reserve_as_requested(char* base, char* requested_address,
+                                           const size_t size, bool special)
+{
+  if (base == requested_address || requested_address == NULL)
+    return false; // did not fail
+
+  if (base != NULL) {
+    // Different reserve address may be acceptable in other cases
+    // but for compressed oops heap should be at requested address.
+    assert(UseCompressedOops, "currently requested address used only for compressed oops");
+    if (PrintCompressedOopsMode) {
+      tty->cr();
+      tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
+    }
+    // OS ignored requested address. Try different address.
+    if (special) {
+      if (!os::release_memory_special(base, size)) {
+        fatal("os::release_memory_special failed");
+      }
+    } else {
+      if (!os::release_memory(base, size)) {
+        fatal("os::release_memory failed");
+      }
+    }
+  }
+  return true;
+}
+
 ReservedSpace::ReservedSpace(const size_t prefix_size,
                              const size_t prefix_align,
                              const size_t suffix_size,
@@ -129,6 +158,10 @@
   assert((suffix_align & prefix_align - 1) == 0,
     "suffix_align not divisible by prefix_align");
 
+  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == prefix_align, "noaccess prefix wrong");
+
   // Add in noaccess_prefix to prefix_size;
   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   const size_t size = adjusted_prefix_size + suffix_size;
@@ -150,15 +183,16 @@
   _noaccess_prefix = 0;
   _executable = false;
 
-  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
-  assert(noaccess_prefix == 0 ||
-         noaccess_prefix == prefix_align, "noaccess prefix wrong");
-
   // Optimistically try to reserve the exact size needed.
   char* addr;
   if (requested_address != 0) {
-    addr = os::attempt_reserve_memory_at(size,
-                                         requested_address-noaccess_prefix);
+    requested_address -= noaccess_prefix; // adjust address
+    assert(requested_address != NULL, "huge noaccess prefix?");
+    addr = os::attempt_reserve_memory_at(size, requested_address);
+    if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
+      // OS ignored requested address. Try different address.
+      addr = NULL;
+    }
   } else {
     addr = os::reserve_memory(size, NULL, prefix_align);
   }
@@ -222,11 +256,20 @@
   bool special = large && !os::can_commit_large_page_memory();
   char* base = NULL;
 
+  if (requested_address != 0) {
+    requested_address -= noaccess_prefix; // adjust requested address
+    assert(requested_address != NULL, "huge noaccess prefix?");
+  }
+
   if (special) {
 
     base = os::reserve_memory_special(size, requested_address, executable);
 
     if (base != NULL) {
+      if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
+        // OS ignored requested address. Try different address.
+        return;
+      }
       // Check alignment constraints
       if (alignment > 0) {
         assert((uintptr_t) base % alignment == 0,
@@ -235,6 +278,13 @@
       _special = true;
     } else {
       // failed; try to reserve regular memory below
+      if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+                            !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+        if (PrintCompressedOopsMode) {
+          tty->cr();
+          tty->print_cr("Reserve regular memory without large pages.");
+        }
+      }
     }
   }
 
@@ -248,8 +298,11 @@
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
-      base = os::attempt_reserve_memory_at(size,
-                                           requested_address-noaccess_prefix);
+      base = os::attempt_reserve_memory_at(size, requested_address);
+      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
+        // OS ignored requested address. Try different address.
+        base = NULL;
+      }
     } else {
       base = os::reserve_memory(size, NULL, alignment);
     }
@@ -365,7 +418,12 @@
 }
 
 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
-  // If there is noaccess prefix, return.
+  assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
+                                      (size_t(_base + _size) > OopEncodingHeapMax) &&
+                                      Universe::narrow_oop_use_implicit_null_checks()),
+         "noaccess_prefix should be used only with non zero based compressed oops");
+
+  // If there is no noaccess prefix, return.
   if (_noaccess_prefix == 0) return;
 
   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
@@ -377,6 +435,10 @@
                           _special)) {
     fatal("cannot protect protection page");
   }
+  if (PrintCompressedOopsMode) {
+    tty->cr();
+    tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
+  }
 
   _base += _noaccess_prefix;
   _size -= _noaccess_prefix;
--- a/hotspot/src/share/vm/utilities/constantTag.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/utilities/constantTag.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -28,56 +28,85 @@
 #ifndef PRODUCT
 
 void constantTag::print_on(outputStream* st) const {
+  st->print(internal_name());
+}
+
+#endif // PRODUCT
+
+BasicType constantTag::basic_type() const {
   switch (_tag) {
-    case JVM_CONSTANT_Class :
-      st->print("Class");
-      break;
-    case JVM_CONSTANT_Fieldref :
-      st->print("Field");
-      break;
-    case JVM_CONSTANT_Methodref :
-      st->print("Method");
-      break;
-    case JVM_CONSTANT_InterfaceMethodref :
-      st->print("InterfaceMethod");
-      break;
-    case JVM_CONSTANT_String :
-      st->print("String");
-      break;
     case JVM_CONSTANT_Integer :
-      st->print("Integer");
-      break;
+      return T_INT;
     case JVM_CONSTANT_Float :
-      st->print("Float");
-      break;
+      return T_FLOAT;
     case JVM_CONSTANT_Long :
-      st->print("Long");
-      break;
+      return T_LONG;
     case JVM_CONSTANT_Double :
-      st->print("Double");
-      break;
-    case JVM_CONSTANT_NameAndType :
-      st->print("NameAndType");
-      break;
-    case JVM_CONSTANT_Utf8 :
-      st->print("Utf8");
-      break;
+      return T_DOUBLE;
+
+    case JVM_CONSTANT_Class :
+    case JVM_CONSTANT_String :
     case JVM_CONSTANT_UnresolvedClass :
-      st->print("Unresolved class");
-      break;
+    case JVM_CONSTANT_UnresolvedClassInError :
     case JVM_CONSTANT_ClassIndex :
-      st->print("Unresolved class index");
-      break;
     case JVM_CONSTANT_UnresolvedString :
-      st->print("Unresolved string");
-      break;
     case JVM_CONSTANT_StringIndex :
-      st->print("Unresolved string index");
-      break;
+    case JVM_CONSTANT_MethodHandle :
+    case JVM_CONSTANT_MethodType :
+    case JVM_CONSTANT_Object :
+      return T_OBJECT;
     default:
       ShouldNotReachHere();
-      break;
+      return T_ILLEGAL;
   }
 }
 
-#endif // PRODUCT
+
+
+const char* constantTag::internal_name() const {
+  switch (_tag) {
+    case JVM_CONSTANT_Invalid :
+      return "Invalid index";
+    case JVM_CONSTANT_Class :
+      return "Class";
+    case JVM_CONSTANT_Fieldref :
+      return "Field";
+    case JVM_CONSTANT_Methodref :
+      return "Method";
+    case JVM_CONSTANT_InterfaceMethodref :
+      return "InterfaceMethod";
+    case JVM_CONSTANT_String :
+      return "String";
+    case JVM_CONSTANT_Integer :
+      return "Integer";
+    case JVM_CONSTANT_Float :
+      return "Float";
+    case JVM_CONSTANT_Long :
+      return "Long";
+    case JVM_CONSTANT_Double :
+      return "Double";
+    case JVM_CONSTANT_NameAndType :
+      return "NameAndType";
+    case JVM_CONSTANT_MethodHandle :
+      return "MethodHandle";
+    case JVM_CONSTANT_MethodType :
+      return "MethodType";
+    case JVM_CONSTANT_Object :
+      return "Object";
+    case JVM_CONSTANT_Utf8 :
+      return "Utf8";
+    case JVM_CONSTANT_UnresolvedClass :
+      return "Unresolved Class";
+    case JVM_CONSTANT_UnresolvedClassInError :
+      return "Unresolved Class Error";
+    case JVM_CONSTANT_ClassIndex :
+      return "Unresolved Class Index";
+    case JVM_CONSTANT_UnresolvedString :
+      return "Unresolved String";
+    case JVM_CONSTANT_StringIndex :
+      return "Unresolved String Index";
+    default:
+      ShouldNotReachHere();
+      return "Illegal";
+  }
+}
--- a/hotspot/src/share/vm/utilities/constantTag.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -78,13 +78,24 @@
   bool is_field_or_method() const   { return is_field() || is_method() || is_interface_method(); }
   bool is_symbol() const            { return is_utf8(); }
 
+  bool is_method_type() const              { return _tag == JVM_CONSTANT_MethodType; }
+  bool is_method_handle() const            { return _tag == JVM_CONSTANT_MethodHandle; }
+
+  constantTag() {
+    _tag = JVM_CONSTANT_Invalid;
+  }
   constantTag(jbyte tag) {
     assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) ||
+           (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_MethodType) ||
            (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag");
     _tag = tag;
   }
 
   jbyte value()                      { return _tag; }
 
+  BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
+
+  const char* internal_name() const;  // for error reporting
+
   void print_on(outputStream* st) const PRODUCT_RETURN;
 };
--- a/hotspot/src/share/vm/utilities/copy.cpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/utilities/copy.cpp	Fri Jul 02 01:36:15 2010 -0700
@@ -48,7 +48,7 @@
     Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
   } else {
     // Not aligned, so no need to be atomic.
-    Copy::conjoint_bytes((void*) src, (void*) dst, size);
+    Copy::conjoint_jbytes((void*) src, (void*) dst, size);
   }
 }
 
--- a/hotspot/src/share/vm/utilities/copy.hpp	Wed Jul 05 17:16:58 2017 +0200
+++ b/hotspot/src/share/vm/utilities/copy.hpp	Fri Jul 02 01:36:15 2010 -0700
@@ -73,6 +73,9 @@
   // whole alignment units.  E.g., if BytesPerLong is 2x word alignment, an odd
   // count may copy an extra word.  In the arrayof case, we are allowed to copy
   // only the number of copy units specified.
+  //
+  // All callees check count for 0.
+  //
 
   // HeapWords
 
@@ -99,7 +102,6 @@
   // Object-aligned words,  conjoint, not atomic on each word
   static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_aligned(from, to);
-    assert_non_zero(count);
     pd_aligned_conjoint_words(from, to, count);
   }
 
@@ -107,49 +109,42 @@
   static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_aligned(from, to);
     assert_disjoint(from, to, count);
-    assert_non_zero(count);
     pd_aligned_disjoint_words(from, to, count);
   }
 
   // bytes, jshorts, jints, jlongs, oops
 
   // bytes,                 conjoint, not atomic on each byte (not that it matters)
-  static void conjoint_bytes(void* from, void* to, size_t count) {
-    assert_non_zero(count);
+  static void conjoint_jbytes(void* from, void* to, size_t count) {
     pd_conjoint_bytes(from, to, count);
   }
 
   // bytes,                 conjoint, atomic on each byte (not that it matters)
-  static void conjoint_bytes_atomic(void* from, void* to, size_t count) {
-    assert_non_zero(count);
+  static void conjoint_jbytes_atomic(void* from, void* to, size_t count) {
     pd_conjoint_bytes(from, to, count);
   }
 
   // jshorts,               conjoint, atomic on each jshort
   static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerShort);
-    assert_non_zero(count);
     pd_conjoint_jshorts_atomic(from, to, count);
   }
 
   // jints,                 conjoint, atomic on each jint
   static void conjoint_jints_atomic(jint* from, jint* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_conjoint_jints_atomic(from, to, count);
   }
 
   // jlongs,                conjoint, atomic on each jlong
   static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerLong);
-    assert_non_zero(count);
     pd_conjoint_jlongs_atomic(from, to, count);
   }
 
   // oops,                  conjoint, atomic on each oop
   static void conjoint_oops_atomic(oop* from, oop* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerHeapOop);
-    assert_non_zero(count);
     pd_conjoint_oops_atomic(from, to, count);
   }
 
@@ -157,7 +152,6 @@
   static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) {
     assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong");
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
   }
 
@@ -168,36 +162,31 @@
   static void conjoint_memory_atomic(void* from, void* to, size_t size);
 
   // bytes,                 conjoint array, atomic on each byte (not that it matters)
-  static void arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
-    assert_non_zero(count);
+  static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) {
     pd_arrayof_conjoint_bytes(from, to, count);
   }
 
   // jshorts,               conjoint array, atomic on each jshort
   static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerShort);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jshorts(from, to, count);
   }
 
   // jints,                 conjoint array, atomic on each jint
   static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerInt);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jints(from, to, count);
   }
 
   // jlongs,                conjoint array, atomic on each jlong
   static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerLong);
-    assert_non_zero(count);
     pd_arrayof_conjoint_jlongs(from, to, count);
   }
 
   // oops,                  conjoint array, atomic on each oop
   static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
     assert_params_ok(from, to, LogBytesPerHeapOop);
-    assert_non_zero(count);
     pd_arrayof_conjoint_oops(from, to, count);
   }
 
@@ -319,14 +308,6 @@
 #endif
   }
 
-  static void assert_non_zero(size_t count) {
-#ifdef ASSERT
-    if (count == 0) {
-      basic_fatal("count must be non-zero");
-    }
-#endif
-  }
-
   static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
 #ifdef ASSERT
     if ((size_t)round_to(byte_count, unit_size) != byte_count) {