Merge
authorduke
Wed, 05 Jul 2017 18:13:47 +0200
changeset 12973 557e12ef205a
parent 12972 96e306b8f5e5 (current diff)
parent 12971 ee8f99ad5223 (diff)
child 12974 7e981cb0ad6a
Merge
--- a/.hgtags-top-repo	Thu Jun 14 13:14:50 2012 -0700
+++ b/.hgtags-top-repo	Wed Jul 05 18:13:47 2017 +0200
@@ -164,3 +164,4 @@
 a2b2d435f1d275fa8010774c653197c64e326d3a jdk8-b40
 1a8c7c530f8a9b7f5bdb9b0693b2f5435ca5205e jdk8-b41
 1ce5dc16416611c58b7480ca67a2eee5153498a6 jdk8-b42
+661c9aae602bbd9766d12590800c90f1edd1d8dd jdk8-b43
--- a/corba/.hgtags	Thu Jun 14 13:14:50 2012 -0700
+++ b/corba/.hgtags	Wed Jul 05 18:13:47 2017 +0200
@@ -164,3 +164,4 @@
 56d030e5035fdee5bba6cf318a06287fda5d67ec jdk8-b40
 113f0d5f0a08aa0947b3edf783b603e7f042748a jdk8-b41
 79cc42c9c71bbd6630ede681642e98f5e4a841fa jdk8-b42
+cd879aff5d3cc1f58829aab3116880aa19525b78 jdk8-b43
--- a/hotspot/.hgtags	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/.hgtags	Wed Jul 05 18:13:47 2017 +0200
@@ -253,3 +253,5 @@
 37add4fa0296705f67481e1fd50e2900cd25e39b jdk8-b41
 bd568544be7fcd12a9327e6c448592198d57b043 hs24-b13
 55954061c6e8750ea39a63523fd65d580db6eeb1 jdk8-b42
+e77b8e0ed1f84e3e268239e276c7ab64fa573baa jdk8-b43
+5ba29a1db46ecb80a321ca873adb56a3fe6ad320 hs24-b14
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,7 @@
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type                  = db.lookupType("constMethodOopDesc");
-    // Backpointer to non-const methodOop
-    method                     = new OopField(type.getOopField("_method"), 0);
+    constants                  = new OopField(type.getOopField("_constants"), 0);
     // The exception handler table. 4-tuples of ints [start_pc, end_pc,
     // handler_pc, catch_type index] For methods with no exceptions the
     // table is pointing to Universe::the_empty_int_array
@@ -69,6 +68,7 @@
     nameIndex                  = new CIntField(type.getCIntegerField("_name_index"), 0);
     signatureIndex             = new CIntField(type.getCIntegerField("_signature_index"), 0);
     genericSignatureIndex      = new CIntField(type.getCIntegerField("_generic_signature_index"),0);
+    idnum                      = new CIntField(type.getCIntegerField("_method_idnum"), 0);
 
     // start of byte code
     bytecodeOffset = type.getSize();
@@ -85,7 +85,7 @@
   }
 
   // Fields
-  private static OopField  method;
+  private static OopField  constants;
   private static OopField  exceptionTable;
   private static CIntField constMethodSize;
   private static ByteField flags;
@@ -93,6 +93,7 @@
   private static CIntField nameIndex;
   private static CIntField signatureIndex;
   private static CIntField genericSignatureIndex;
+  private static CIntField idnum;
 
   // start of bytecode
   private static long bytecodeOffset;
@@ -100,9 +101,15 @@
   private static long checkedExceptionElementSize;
   private static long localVariableTableElementSize;
 
+  public Method getMethod() {
+    InstanceKlass ik = (InstanceKlass)getConstants().getPoolHolder();
+    ObjArray methods = ik.getMethods();
+    return (Method)methods.getObjAt(getIdNum());
+  }
+
   // Accessors for declared fields
-  public Method getMethod() {
-    return (Method) method.getValue(this);
+  public ConstantPool getConstants() {
+    return (ConstantPool) constants.getValue(this);
   }
 
   public TypeArray getExceptionTable() {
@@ -133,6 +140,10 @@
     return genericSignatureIndex.getValue(this);
   }
 
+  public long getIdNum() {
+    return idnum.getValue(this);
+  }
+
   public Symbol getName() {
     return getMethod().getName();
   }
@@ -223,7 +234,7 @@
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
     super.iterateFields(visitor, doVMFields);
     if (doVMFields) {
-      visitor.doOop(method, true);
+      visitor.doOop(constants, true);
       visitor.doOop(exceptionTable, true);
       visitor.doCInt(constMethodSize, true);
       visitor.doByte(flags, true);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,6 @@
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
     Type type                  = db.lookupType("methodOopDesc");
     constMethod                = new OopField(type.getOopField("_constMethod"), 0);
-    constants                  = new OopField(type.getOopField("_constants"), 0);
     methodData                 = new OopField(type.getOopField("_method_data"), 0);
     methodSize                 = new CIntField(type.getCIntegerField("_method_size"), 0);
     maxStack                   = new CIntField(type.getCIntegerField("_max_stack"), 0);
@@ -83,7 +82,6 @@
 
   // Fields
   private static OopField  constMethod;
-  private static OopField  constants;
   private static OopField  methodData;
   private static CIntField methodSize;
   private static CIntField maxStack;
@@ -125,7 +123,9 @@
 
   // Accessors for declared fields
   public ConstMethod  getConstMethod()                { return (ConstMethod)  constMethod.getValue(this);       }
-  public ConstantPool getConstants()                  { return (ConstantPool) constants.getValue(this);         }
+  public ConstantPool getConstants()                  {
+    return getConstMethod().getConstants();
+  }
   public MethodData   getMethodData()                 { return (MethodData) methodData.getValue(this);          }
   public TypeArray    getExceptionTable()             { return getConstMethod().getExceptionTable();            }
   /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
@@ -281,7 +281,6 @@
     super.iterateFields(visitor, doVMFields);
     if (doVMFields) {
       visitor.doOop(constMethod, true);
-      visitor.doOop(constants, true);
       visitor.doCInt(methodSize, true);
       visitor.doCInt(maxStack, true);
       visitor.doCInt(maxLocals, true);
--- a/hotspot/make/bsd/makefiles/gcc.make	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/make/bsd/makefiles/gcc.make	Wed Jul 05 18:13:47 2017 +0200
@@ -214,7 +214,7 @@
 
 # Flags for generating make dependency flags.
 ifneq ("${CC_VER_MAJOR}", "2")
-DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
 endif
 
 # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
--- a/hotspot/make/hotspot_version	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/make/hotspot_version	Wed Jul 05 18:13:47 2017 +0200
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=13
+HS_BUILD_NUMBER=14
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/jprt.properties	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/make/jprt.properties	Wed Jul 05 18:13:47 2017 +0200
@@ -54,72 +54,72 @@
 # Define the Solaris platforms we want for the various releases
 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7u4=${jprt.my.solaris.sparc.jdk7}
+jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7u4=${jprt.my.solaris.sparcv9.jdk7}
+jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7u4=${jprt.my.solaris.i586.jdk7}
+jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7u4=${jprt.my.solaris.x64.jdk7}
+jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
-jprt.my.linux.i586.jdk7u4=${jprt.my.linux.i586.jdk7}
+jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7}
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
-jprt.my.linux.x64.jdk7u4=${jprt.my.linux.x64.jdk7}
+jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7u4=${jprt.my.linux.ppc.jdk7}
+jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7}
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7u4=${jprt.my.linux.ppcv2.jdk7}
+jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7}
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7u4=${jprt.my.linux.ppcsflt.jdk7}
+jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7}
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7u4=${jprt.my.linux.armvfp.jdk7}
+jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7}
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7u4=${jprt.my.linux.armsflt.jdk7}
+jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7}
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
 jprt.my.macosx.x64.jdk8=macosx_x64_10.7
 jprt.my.macosx.x64.jdk7=macosx_x64_10.7
-jprt.my.macosx.x64.jdk7u4=${jprt.my.macosx.x64.jdk7}
+jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
 jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
-jprt.my.windows.i586.jdk7u4=${jprt.my.windows.i586.jdk7}
+jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
-jprt.my.windows.x64.jdk7u4=${jprt.my.windows.x64.jdk7}
+jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
@@ -154,7 +154,7 @@
 
 jprt.build.targets.jdk8=${jprt.build.targets.all}
 jprt.build.targets.jdk7=${jprt.build.targets.all}
-jprt.build.targets.jdk7u4=${jprt.build.targets.all}
+jprt.build.targets.jdk7u6=${jprt.build.targets.all}
 jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
 
 # Subset lists of test targets for this source tree
@@ -346,12 +346,12 @@
     ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
     ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
     ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
-#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
-#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
-#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
-#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
 
 jprt.my.windows.i586.test.targets = \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
@@ -447,7 +447,7 @@
 
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
-jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7}
+jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7}
 jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
 
 # The default test/Makefile targets that should be run
@@ -507,6 +507,9 @@
 
 jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7}
+jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7}
 jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
 
+# 7155453: Work-around to prevent popups on OSX from blocking test completion
+# but the work-around is added to all platforms to be consistent 
+jprt.jbb.options=-Djava.awt.headless=true
--- a/hotspot/make/linux/makefiles/gcc.make	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Wed Jul 05 18:13:47 2017 +0200
@@ -166,7 +166,7 @@
 
 # Flags for generating make dependency flags.
 ifneq ("${CC_VER_MAJOR}", "2")
-DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
 endif
 
 # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
--- a/hotspot/make/solaris/makefiles/gcc.make	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/make/solaris/makefiles/gcc.make	Wed Jul 05 18:13:47 2017 +0200
@@ -141,7 +141,7 @@
 
 # Flags for generating make dependency flags.
 ifneq ("${CC_VER_MAJOR}", "2")
-DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
+DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
 endif
 
 # -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -644,30 +644,6 @@
 }
 
 
-void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
-  assert(x->number_of_arguments() == 3, "wrong type");
-  LIRItem obj       (x->argument_at(0), this);  // AtomicLong object
-  LIRItem cmp_value (x->argument_at(1), this);  // value to compare with field
-  LIRItem new_value (x->argument_at(2), this);  // replace field with new_value if it matches cmp_value
-
-  obj.load_item();
-  cmp_value.load_item();
-  new_value.load_item();
-
-  // generate compare-and-swap and produce zero condition if swap occurs
-  int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
-  LIR_Opr addr = FrameMap::O7_opr;
-  __ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);
-  LIR_Opr t1 = FrameMap::G1_opr;  // temp for 64-bit value
-  LIR_Opr t2 = FrameMap::G3_opr;  // temp for 64-bit value
-  __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
-
-  // generate conditional move of boolean result
-  LIR_Opr result = rlock_result(x);
-  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
-}
-
-
 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
   assert(x->number_of_arguments() == 4, "wrong type");
   LIRItem obj   (x->argument_at(0), this);  // object
@@ -989,10 +965,10 @@
   if (!x->klass()->is_loaded() || PatchALot) {
     patching_info = state_for(x, x->state_before());
 
-    // cannot re-use same xhandlers for multiple CodeEmitInfos, so
-    // clone all handlers.  This is handled transparently in other
-    // places by the CodeEmitInfo cloning logic but is handled
-    // specially here because a stub isn't being used.
+    // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
+    // clone all handlers (NOTE: Usually this is handled transparently
+    // by the CodeEmitInfo cloning logic in CodeStub constructors but
+    // is done explicitly here because a stub isn't being used).
     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
   }
   CodeEmitInfo* info = state_for(x, x->state());
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -490,7 +490,8 @@
                       ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
 
     // get constant pool cache
-    __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch);
+    __ ld_ptr(G5_method, in_bytes(methodOopDesc::const_offset()), G3_scratch);
+    __ ld_ptr(G3_scratch, in_bytes(constMethodOopDesc::constants_offset()), G3_scratch);
     __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
 
     // get specific constant pool cache entry
@@ -768,7 +769,8 @@
     // for static methods insert the mirror argument
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 
-    __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
+    __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: const_offset())), O1);
+    __ ld_ptr(Address(O1, 0, in_bytes(constMethodOopDesc::constants_offset())), O1);
     __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
     __ ld_ptr(O1, mirror_offset, O1);
     // where the mirror handle body is allocated:
@@ -1047,7 +1049,7 @@
   assert_different_registers(state, prev_state);
   assert_different_registers(prev_state, G3_scratch);
   const Register Gtmp = G3_scratch;
-  const Address constants         (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
+  const Address constMethod       (G5_method, 0, in_bytes(methodOopDesc::const_offset()));
   const Address access_flags      (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
   const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
   const Address max_stack         (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
@@ -1155,7 +1157,8 @@
   __ set((int) BytecodeInterpreter::method_entry, O1);
   __ st(O1, XXX_STATE(_msg));
 
-  __ ld_ptr(constants, O3);
+  __ ld_ptr(constMethod, O3);
+  __ ld_ptr(O3, in_bytes(constMethodOopDesc::constants_offset()), O3);
   __ ld_ptr(O3, constantPoolOopDesc::cache_offset_in_bytes(), O2);
   __ st_ptr(O2, XXX_STATE(_constants));
 
@@ -1178,7 +1181,8 @@
     __ ld_ptr(XXX_STATE(_locals), O1);
     __ br( Assembler::zero, true, Assembler::pt, got_obj);
     __ delayed()->ld_ptr(O1, 0, O1);                  // get receiver for not-static case
-    __ ld_ptr(constants, O1);
+    __ ld_ptr(constMethod, O1);
+    __ ld_ptr( O1, in_bytes(constMethodOopDesc::constants_offset()), O1);
     __ ld_ptr( O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
     // lock the mirror, not the klassOop
     __ ld_ptr( O1, mirror_offset, O1);
@@ -1536,7 +1540,7 @@
   const Register Gtmp1 = G3_scratch;
   // const Register Lmirror = L1;     // native mirror (native calls only)
 
-  const Address constants         (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
+  const Address constMethod       (G5_method, 0, in_bytes(methodOopDesc::const_offset()));
   const Address access_flags      (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
   const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
   const Address max_stack         (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -934,8 +934,14 @@
 }
 
 
+void InterpreterMacroAssembler::get_const(Register Rdst) {
+  ld_ptr(Lmethod, in_bytes(methodOopDesc::const_offset()), Rdst);
+}
+
+
 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
-  ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst);
+  get_const(Rdst);
+  ld_ptr(Rdst, in_bytes(constMethodOopDesc::constants_offset()), Rdst);
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -205,6 +205,7 @@
   void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
   void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
 
+  void get_const(Register Rdst);
   void get_constant_pool(Register Rdst);
   void get_constant_pool_cache(Register Rdst);
   void get_cpool_and_tags(Register Rcpool, Register Rtags);
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Jul 05 18:13:47 2017 +0200
@@ -827,7 +827,6 @@
       // a Load
       // inputs are (0:control, 1:memory, 2:address)
       if (!(n->ideal_Opcode()==ld_op)       && // Following are special cases
-          !(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&
           !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
           !(n->ideal_Opcode()==Op_LoadI     && ld_op==Op_LoadF) &&
           !(n->ideal_Opcode()==Op_LoadF     && ld_op==Op_LoadI) &&
@@ -7306,17 +7305,6 @@
   ins_pipe(iload_mem);
 %}
 
-// LoadL-locked.  Same as a regular long load when used with a compare-swap
-instruct loadLLocked(iRegL dst, memory mem) %{
-  match(Set dst (LoadLLocked mem));
-  ins_cost(MEMORY_REF_COST);
-  size(4);
-  format %{ "LDX    $mem,$dst\t! long" %}
-  opcode(Assembler::ldx_op3);
-  ins_encode(simple_form3_mem_reg( mem, dst ) );
-  ins_pipe(iload_mem);
-%}
-
 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
   match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
   effect( KILL newval );
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,8 @@
     __ br( Assembler::zero, true, Assembler::pt, done);
     __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
 
-    __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
+    __ ld_ptr( Lmethod, in_bytes(methodOopDesc::const_offset()), O0);
+    __ ld_ptr( O0, in_bytes(constMethodOopDesc::constants_offset()), O0);
     __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
 
     // lock the mirror, not the klassOop
@@ -670,7 +671,8 @@
                       ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
 
     // get constant pool cache
-    __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
+    __ ld_ptr(G5_method, methodOopDesc::const_offset(), G3_scratch);
+    __ ld_ptr(G3_scratch, constMethodOopDesc::constants_offset(), G3_scratch);
     __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
 
     // get specific constant pool cache entry
@@ -993,7 +995,8 @@
     // for static methods insert the mirror argument
     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 
-    __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
+    __ ld_ptr(Lmethod, methodOopDesc:: const_offset(), O1);
+    __ ld_ptr(O1, constMethodOopDesc::constants_offset(), O1);
     __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
     __ ld_ptr(O1, mirror_offset, O1);
 #ifdef ASSERT
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -6927,21 +6927,42 @@
   addptr(rsp,sizeof(jdouble));
 }
 
+void MacroAssembler::increase_precision() {
+  subptr(rsp, BytesPerWord);
+  fnstcw(Address(rsp, 0));
+  movl(rax, Address(rsp, 0));
+  orl(rax, 0x300);
+  push(rax);
+  fldcw(Address(rsp, 0));
+  pop(rax);
+}
+
+void MacroAssembler::restore_precision() {
+  fldcw(Address(rsp, 0));
+  addptr(rsp, BytesPerWord);
+}
+
 void MacroAssembler::fast_pow() {
   // computes X^Y = 2^(Y * log2(X))
   // if fast computation is not possible, result is NaN. Requires
   // fallback from user of this macro.
+  // increase precision for intermediate steps of the computation
+  increase_precision();
   fyl2x();                 // Stack: (Y*log2(X)) ...
   pow_exp_core_encoding(); // Stack: exp(X) ...
+  restore_precision();
 }
 
 void MacroAssembler::fast_exp() {
   // computes exp(X) = 2^(X * log2(e))
   // if fast computation is not possible, result is NaN. Requires
   // fallback from user of this macro.
+  // increase precision for intermediate steps of the computation
+  increase_precision();
   fldl2e();                // Stack: log2(e) X ...
   fmulp(1);                // Stack: (X*log2(e)) ...
   pow_exp_core_encoding(); // Stack: exp(X) ...
+  restore_precision();
 }
 
 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -2395,6 +2395,8 @@
   // runtime call.
   void fast_pow();
   void fast_exp();
+  void increase_precision();
+  void restore_precision();
 
   // computes exp(x). Fallback to runtime call included.
   void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -2673,7 +2673,7 @@
 #endif // _LP64
         }
       } else {
-        ShouldNotReachHere();
+        fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
       }
       // cpu register - address
     } else if (opr2->is_address()) {
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -718,35 +718,6 @@
 }
 
 
-void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
-  assert(x->number_of_arguments() == 3, "wrong type");
-  LIRItem obj       (x->argument_at(0), this);  // AtomicLong object
-  LIRItem cmp_value (x->argument_at(1), this);  // value to compare with field
-  LIRItem new_value (x->argument_at(2), this);  // replace field with new_value if it matches cmp_value
-
-  // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
-  cmp_value.load_item_force(FrameMap::long0_opr);
-
-  // new value must be in rcx,ebx (hi,lo)
-  new_value.load_item_force(FrameMap::long1_opr);
-
-  // object pointer register is overwritten with field address
-  obj.load_item();
-
-  // generate compare-and-swap; produces zero condition if swap occurs
-  int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
-  LIR_Opr addr = new_pointer_register();
-  __ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);
-  LIR_Opr t1 = LIR_OprFact::illegalOpr;  // no temp needed
-  LIR_Opr t2 = LIR_OprFact::illegalOpr;  // no temp needed
-  __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
-
-  // generate conditional move of boolean result
-  LIR_Opr result = rlock_result(x);
-  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
-}
-
-
 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
   assert(x->number_of_arguments() == 4, "wrong type");
   LIRItem obj   (x->argument_at(0), this);  // object
@@ -1116,10 +1087,10 @@
   if (!x->klass()->is_loaded() || PatchALot) {
     patching_info = state_for(x, x->state_before());
 
-    // cannot re-use same xhandlers for multiple CodeEmitInfos, so
-    // clone all handlers.  This is handled transparently in other
-    // places by the CodeEmitInfo cloning logic but is handled
-    // specially here because a stub isn't being used.
+    // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
+    // clone all handlers (NOTE: Usually this is handled transparently
+    // by the CodeEmitInfo cloning logic in CodeStub constructors but
+    // is done explicitly here because a stub isn't being used).
     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
   }
   CodeEmitInfo* info = state_for(x, x->state());
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -481,7 +481,8 @@
   __ xorptr(rdx, rdx);
   __ movptr(STATE(_oop_temp), rdx);                     // state->_oop_temp = NULL (only really needed for native)
   __ movptr(STATE(_mdx), rdx);                          // state->_mdx = NULL
-  __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+  __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+  __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
   __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
   __ movptr(STATE(_constants), rdx);                    // state->_constants = constants()
 
@@ -516,7 +517,8 @@
     __ testl(rax, JVM_ACC_STATIC);
     __ movptr(rax, Address(locals, 0));                   // get receiver (assume this is frequent case)
     __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+    __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
     __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(rax, Address(rax, mirror_offset));
     __ bind(done);
@@ -769,7 +771,8 @@
     __ testl(rax, JVM_ACC_STATIC);
     __ movptr(rax, Address(rdi, 0));                                    // get receiver (assume this is frequent case)
     __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+    __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
     __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(rax, Address(rax, mirror_offset));
     __ bind(done);
@@ -821,9 +824,9 @@
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, slow_path);
 
-    __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
     // read first instruction word and extract bytecode @ 1 and index @ 2
     __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
     __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
     // Shift codes right to get the index on the right.
     // The bytecode fetched looks like <index><0xb4><0x2a>
@@ -1185,7 +1188,8 @@
     __ testl(t, JVM_ACC_STATIC);
     __ jcc(Assembler::zero, L);
     // get mirror
-    __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
+    __ movptr(t, Address(method, methodOopDesc:: const_offset()));
+    __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
     __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(t, Address(t, mirror_offset));
     // copy mirror into activation object
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,8 @@
 
   // Helpers for runtime call arguments/results
   void get_method(Register reg)                            { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
-  void get_constant_pool(Register reg)                     { get_method(reg); movptr(reg, Address(reg, methodOopDesc::constants_offset())); }
+  void get_const(Register reg)                             { get_method(reg); movptr(reg, Address(reg, methodOopDesc::const_offset())); }
+  void get_constant_pool(Register reg)                     { get_const(reg); movptr(reg, Address(reg, constMethodOopDesc::constants_offset())); }
   void get_constant_pool_cache(Register reg)               { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
   void get_cpool_and_tags(Register cpool, Register tags)   { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
   }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,9 +84,14 @@
     movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
   }
 
-  void get_constant_pool(Register reg) {
+  void get_const(Register reg) {
     get_method(reg);
-    movptr(reg, Address(reg, methodOopDesc::constants_offset()));
+    movptr(reg, Address(reg, methodOopDesc::const_offset()));
+  }
+
+  void get_constant_pool(Register reg) {
+    get_const(reg);
+    movptr(reg, Address(reg, constMethodOopDesc::constants_offset()));
   }
 
   void get_constant_pool_cache(Register reg) {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,7 +566,8 @@
     __ testl(rax, JVM_ACC_STATIC);
     __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0)));  // get receiver (assume this is frequent case)
     __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+    __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
     __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(rax, Address(rax, mirror_offset));
     __ bind(done);
@@ -606,7 +607,8 @@
     __ push(0);
   }
 
-  __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+  __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+  __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
   __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
   __ push(rdx);                                       // set constant pool cache
   __ push(rdi);                                       // set locals pointer
@@ -661,9 +663,9 @@
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, slow_path);
 
-    __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
     // read first instruction word and extract bytecode @ 1 and index @ 2
     __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
     __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
     // Shift codes right to get the index on the right.
     // The bytecode fetched looks like <index><0xb4><0x2a>
@@ -1026,7 +1028,8 @@
     __ testl(t, JVM_ACC_STATIC);
     __ jcc(Assembler::zero, L);
     // get mirror
-    __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
+    __ movptr(t, Address(method, methodOopDesc:: const_offset()));
+    __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
     __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(t, Address(t, mirror_offset));
     // copy mirror into activation frame
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -522,7 +522,8 @@
     // get receiver (assume this is frequent case)
     __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
     __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+    __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
     __ movptr(rax, Address(rax,
                            constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(rax, Address(rax, mirror_offset));
@@ -579,7 +580,8 @@
     __ push(0);
   }
 
-  __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+  __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+  __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
   __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
   __ push(rdx); // set constant pool cache
   __ push(r14); // set locals pointer
@@ -629,9 +631,9 @@
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, slow_path);
 
-    __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
     // read first instruction word and extract bytecode @ 1 and index @ 2
     __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+    __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
     __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
     // Shift codes right to get the index on the right.
     // The bytecode fetched looks like <index><0xb4><0x2a>
@@ -1020,7 +1022,8 @@
     __ testl(t, JVM_ACC_STATIC);
     __ jcc(Assembler::zero, L);
     // get mirror
-    __ movptr(t, Address(method, methodOopDesc::constants_offset()));
+    __ movptr(t, Address(method, methodOopDesc::const_offset()));
+    __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
     __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
     __ movptr(t, Address(t, mirror_offset));
     // copy mirror into activation frame
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Jul 05 18:13:47 2017 +0200
@@ -5555,8 +5555,9 @@
   ins_pipe( ialu_reg_reg);
 %}
 
-instruct bytes_reverse_unsigned_short(eRegI dst) %{
+instruct bytes_reverse_unsigned_short(eRegI dst, eFlagsReg cr) %{
   match(Set dst (ReverseBytesUS dst));
+  effect(KILL cr);
 
   format %{ "BSWAP  $dst\n\t" 
             "SHR    $dst,16\n\t" %}
@@ -5567,8 +5568,9 @@
   ins_pipe( ialu_reg );
 %}
 
-instruct bytes_reverse_short(eRegI dst) %{
+instruct bytes_reverse_short(eRegI dst, eFlagsReg cr) %{
   match(Set dst (ReverseBytesS dst));
+  effect(KILL cr);
 
   format %{ "BSWAP  $dst\n\t" 
             "SAR    $dst,16\n\t" %}
@@ -5729,9 +5731,10 @@
 
 //---------- Population Count Instructions -------------------------------------
 
-instruct popCountI(eRegI dst, eRegI src) %{
+instruct popCountI(eRegI dst, eRegI src, eFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountI src));
+  effect(KILL cr);
 
   format %{ "POPCNT $dst, $src" %}
   ins_encode %{
@@ -5740,9 +5743,10 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct popCountI_mem(eRegI dst, memory mem) %{
+instruct popCountI_mem(eRegI dst, memory mem, eFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountI (LoadI mem)));
+  effect(KILL cr);
 
   format %{ "POPCNT $dst, $mem" %}
   ins_encode %{
@@ -7796,50 +7800,6 @@
   ins_pipe( ialu_reg_mem );
 %}
 
-// LoadLong-locked - same as a volatile long load when used with compare-swap
-instruct loadLLocked(stackSlotL dst, memory mem) %{
-  predicate(UseSSE<=1);
-  match(Set dst (LoadLLocked mem));
-
-  ins_cost(200);
-  format %{ "FILD   $mem\t# Atomic volatile long load\n\t"
-            "FISTp  $dst" %}
-  ins_encode(enc_loadL_volatile(mem,dst));
-  ins_pipe( fpu_reg_mem );
-%}
-
-instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{
-  predicate(UseSSE>=2);
-  match(Set dst (LoadLLocked mem));
-  effect(TEMP tmp);
-  ins_cost(180);
-  format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
-            "MOVSD  $dst,$tmp" %}
-  ins_encode %{
-    __ movdbl($tmp$$XMMRegister, $mem$$Address);
-    __ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{
-  predicate(UseSSE>=2);
-  match(Set dst (LoadLLocked mem));
-  effect(TEMP tmp);
-  ins_cost(160);
-  format %{ "MOVSD  $tmp,$mem\t# Atomic volatile long load\n\t"
-            "MOVD   $dst.lo,$tmp\n\t"
-            "PSRLQ  $tmp,32\n\t"
-            "MOVD   $dst.hi,$tmp" %}
-  ins_encode %{
-    __ movdbl($tmp$$XMMRegister, $mem$$Address);
-    __ movdl($dst$$Register, $tmp$$XMMRegister);
-    __ psrlq($tmp$$XMMRegister, 32);
-    __ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Conditional-store of the updated heap-top.
 // Used during allocation of the shared heap.
 // Sets flags (EQ) on success.  Implemented with a CMPXCHG on Intel.
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Jul 05 18:13:47 2017 +0200
@@ -6417,14 +6417,14 @@
   match(Set dst (ReverseBytesL dst));
 
   format %{ "bswapq  $dst" %}
-
   opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
   ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
   ins_pipe( ialu_reg);
 %}
 
-instruct bytes_reverse_unsigned_short(rRegI dst) %{
+instruct bytes_reverse_unsigned_short(rRegI dst, rFlagsReg cr) %{
   match(Set dst (ReverseBytesUS dst));
+  effect(KILL cr);
 
   format %{ "bswapl  $dst\n\t"
             "shrl    $dst,16\n\t" %}
@@ -6435,8 +6435,9 @@
   ins_pipe( ialu_reg );
 %}
 
-instruct bytes_reverse_short(rRegI dst) %{
+instruct bytes_reverse_short(rRegI dst, rFlagsReg cr) %{
   match(Set dst (ReverseBytesS dst));
+  effect(KILL cr);
 
   format %{ "bswapl  $dst\n\t"
             "sar     $dst,16\n\t" %}
@@ -6564,9 +6565,10 @@
 
 //---------- Population Count Instructions -------------------------------------
 
-instruct popCountI(rRegI dst, rRegI src) %{
+instruct popCountI(rRegI dst, rRegI src, rFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountI src));
+  effect(KILL cr);
 
   format %{ "popcnt  $dst, $src" %}
   ins_encode %{
@@ -6575,9 +6577,10 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct popCountI_mem(rRegI dst, memory mem) %{
+instruct popCountI_mem(rRegI dst, memory mem, rFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountI (LoadI mem)));
+  effect(KILL cr);
 
   format %{ "popcnt  $dst, $mem" %}
   ins_encode %{
@@ -6587,9 +6590,10 @@
 %}
 
 // Note: Long.bitCount(long) returns an int.
-instruct popCountL(rRegI dst, rRegL src) %{
+instruct popCountL(rRegI dst, rRegL src, rFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountL src));
+  effect(KILL cr);
 
   format %{ "popcnt  $dst, $src" %}
   ins_encode %{
@@ -6599,9 +6603,10 @@
 %}
 
 // Note: Long.bitCount(long) returns an int.
-instruct popCountL_mem(rRegI dst, memory mem) %{
+instruct popCountL_mem(rRegI dst, memory mem, rFlagsReg cr) %{
   predicate(UsePopCountInstruction);
   match(Set dst (PopCountL (LoadL mem)));
+  effect(KILL cr);
 
   format %{ "popcnt  $dst, $mem" %}
   ins_encode %{
@@ -7492,18 +7497,6 @@
   ins_pipe(ialu_reg_mem); // XXX
 %}
 
-// LoadL-locked - same as a regular LoadL when used with compare-swap
-instruct loadLLocked(rRegL dst, memory mem)
-%{
-  match(Set dst (LoadLLocked mem));
-
-  ins_cost(125); // XXX
-  format %{ "movq    $dst, $mem\t# long locked" %}
-  opcode(0x8B);
-  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
-  ins_pipe(ialu_reg_mem); // XXX
-%}
-
 // Conditional-store of the updated heap-top.
 // Used during allocation of the shared heap.
 // Sets flags (EQ) on success.  Implemented with a CMPXCHG on Intel.
--- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -220,10 +220,10 @@
   printf("\n");
 
   GEN_OFFS(methodOopDesc, _constMethod);
-  GEN_OFFS(methodOopDesc, _constants);
   GEN_OFFS(methodOopDesc, _access_flags);
   printf("\n");
 
+  GEN_OFFS(constMethodOopDesc, _constants);
   GEN_OFFS(constMethodOopDesc, _flags);
   GEN_OFFS(constMethodOopDesc, _code_size);
   GEN_OFFS(constMethodOopDesc, _name_index);
--- a/hotspot/src/os/solaris/dtrace/jhelper.d	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/os/solaris/dtrace/jhelper.d	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,7 +118,7 @@
   copyin_offset(OFFSET_Symbol_body);
 
   copyin_offset(OFFSET_methodOopDesc_constMethod);
-  copyin_offset(OFFSET_methodOopDesc_constants);
+  copyin_offset(OFFSET_constMethodOopDesc_constants);
   copyin_offset(OFFSET_constMethodOopDesc_name_index);
   copyin_offset(OFFSET_constMethodOopDesc_signature_index);
 
@@ -359,8 +359,8 @@
   this->signatureIndex = copyin_uint16(this->constMethod +
       OFFSET_constMethodOopDesc_signature_index);
 
-  this->constantPool = copyin_ptr(this->methodOopPtr +
-      OFFSET_methodOopDesc_constants);
+  this->constantPool = copyin_ptr(this->constMethod +
+      OFFSET_constMethodOopDesc_constants);
 
   this->nameSymbol = copyin_ptr(this->constantPool +
       this->nameIndex * sizeof (pointer) + SIZE_constantPoolOopDesc);
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -514,9 +514,9 @@
   char * signatureString = NULL;
   int err;
 
-  err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constants, &constantPool);
+  err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constMethod, &constMethod);
   CHECK_FAIL(err);
-  err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constMethod, &constMethod);
+  err = read_pointer(J->P, constMethod + OFFSET_constMethodOopDesc_constants, &constantPool);
   CHECK_FAIL(err);
 
   /* To get name string */
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1591,7 +1591,8 @@
     case 5001: st->print(" Windows XP"); break;
     case 5002:
     case 6000:
-    case 6001: {
+    case 6001:
+    case 6002: {
       // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
       // find out whether we are running on 64 bit processor or not.
       SYSTEM_INFO si;
@@ -1623,6 +1624,14 @@
         }
         if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
             st->print(" , 64 bit");
+      } else if (os_vers == 6002) {
+        if (osvi.wProductType == VER_NT_WORKSTATION) {
+            st->print(" Windows 8");
+        } else {
+            st->print(" Windows Server 2012");
+        }
+        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
+            st->print(" , 64 bit");
       } else { // future os
         // Unrecognized windows, print out its major and minor versions
         st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
--- a/hotspot/src/share/vm/adlc/forms.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/adlc/forms.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -261,7 +261,6 @@
   if( strcmp(opType,"LoadL")==0 )  return Form::idealL;
   if( strcmp(opType,"LoadL_unaligned")==0 )  return Form::idealL;
   if( strcmp(opType,"LoadPLocked")==0 )  return Form::idealP;
-  if( strcmp(opType,"LoadLLocked")==0 )  return Form::idealL;
   if( strcmp(opType,"LoadP")==0 )  return Form::idealP;
   if( strcmp(opType,"LoadN")==0 )  return Form::idealN;
   if( strcmp(opType,"LoadRange")==0 )  return Form::idealI;
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -3387,7 +3387,7 @@
     "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
     "Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S",
     "LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",
-    "LoadPLocked", "LoadLLocked",
+    "LoadPLocked",
     "StorePConditional", "StoreIConditional", "StoreLConditional",
     "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
     "StoreCM",
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -42,6 +42,11 @@
   // the instruction stream (because the instruction list is embedded
   // in the instructions).
   if (canonical() != x) {
+#ifndef PRODUCT
+    if (!x->has_printable_bci()) {
+      x->set_printable_bci(bci());
+    }
+#endif
     if (PrintCanonicalization) {
       PrintValueVisitor do_print_value;
       canonical()->input_values_do(&do_print_value);
@@ -451,6 +456,28 @@
     }
     break;
   }
+  case vmIntrinsics::_isInstance          : {
+    assert(x->number_of_arguments() == 2, "wrong type");
+
+    InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
+    if (c != NULL && !c->value()->is_null_object()) {
+      // ciInstance::java_mirror_type() returns non-NULL only for Java mirrors
+      ciType* t = c->value()->as_instance()->java_mirror_type();
+      if (t->is_klass()) {
+        // substitute cls.isInstance(obj) of a constant Class into
+        // an InstantOf instruction
+        InstanceOf* i = new InstanceOf(t->as_klass(), x->argument_at(1), x->state_before());
+        set_canonical(i);
+        // and try to canonicalize even further
+        do_InstanceOf(i);
+      } else {
+        assert(t->is_primitive_type(), "should be a primitive type");
+        // cls.isInstance(obj) always returns false for primitive classes
+        set_constant(0);
+      }
+    }
+    break;
+  }
   }
 }
 
@@ -677,8 +704,8 @@
                 return;
             }
           }
+          set_bci(cmp->state_before()->bci());
           set_canonical(canon);
-          set_bci(cmp->state_before()->bci());
         }
       }
     } else if (l->as_InstanceOf() != NULL) {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -3170,6 +3170,7 @@
       break;
 
     case vmIntrinsics::_getClass      :
+    case vmIntrinsics::_isInstance    :
       if (!InlineClassNatives) return false;
       preserves_state = true;
       break;
@@ -3194,13 +3195,6 @@
       preserves_state = true;
       break;
 
-    // sun/misc/AtomicLong.attemptUpdate
-    case vmIntrinsics::_attemptUpdate :
-      if (!VM_Version::supports_cx8()) return false;
-      if (!InlineAtomicLong) return false;
-      preserves_state = true;
-      break;
-
     // Use special nodes for Unsafe instructions so we can more easily
     // perform an address-mode optimization on the raw variants
     case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT,  false);
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -302,8 +302,6 @@
 
   void update_exception_state(ValueStack* state);
 
-  bool has_printable_bci() const                 { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); }
-
  protected:
   void set_type(ValueType* type) {
     assert(type != NULL, "type must exist");
@@ -392,8 +390,9 @@
   // accessors
   int id() const                                 { return _id; }
 #ifndef PRODUCT
+  bool has_printable_bci() const                 { return _printable_bci != -99; }
   int printable_bci() const                      { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
-  void set_printable_bci(int bci)                { NOT_PRODUCT(_printable_bci = bci;) }
+  void set_printable_bci(int bci)                { _printable_bci = bci; }
 #endif
   int use_count() const                          { return _use_count; }
   int pin_state() const                          { return _pin_state; }
@@ -576,6 +575,7 @@
   , _block(b)
   , _index(index)
   {
+    NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
     if (type->is_illegal()) {
       make_illegal();
     }
@@ -631,7 +631,9 @@
     : Instruction(type)
     , _java_index(index)
     , _declared_type(declared)
-  {}
+  {
+    NOT_PRODUCT(set_printable_bci(-1));
+  }
 
   // accessors
   int java_index() const                         { return _java_index; }
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1242,6 +1242,36 @@
               NULL   /* info */);
 }
 
+// Example: clazz.isInstance(object)
+void LIRGenerator::do_isInstance(Intrinsic* x) {
+  assert(x->number_of_arguments() == 2, "wrong type");
+
+  // TODO could try to substitute this node with an equivalent InstanceOf
+  // if clazz is known to be a constant Class. This will pick up newly found
+  // constants after HIR construction. I'll leave this to a future change.
+
+  // as a first cut, make a simple leaf call to runtime to stay platform independent.
+  // could follow the aastore example in a future change.
+
+  LIRItem clazz(x->argument_at(0), this);
+  LIRItem object(x->argument_at(1), this);
+  clazz.load_item();
+  object.load_item();
+  LIR_Opr result = rlock_result(x);
+
+  // need to perform null check on clazz
+  if (x->needs_null_check()) {
+    CodeEmitInfo* info = state_for(x);
+    __ null_check(clazz.result(), info);
+  }
+
+  LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
+                                     CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
+                                     x->type(),
+                                     NULL); // NULL CodeEmitInfo results in a leaf call
+  __ move(call_result, result);
+}
+
 // Example: object.getClass ()
 void LIRGenerator::do_getClass(Intrinsic* x) {
   assert(x->number_of_arguments() == 1, "wrong type");
@@ -2777,31 +2807,29 @@
       int index = bcs.get_method_index();
       size_t call_site_offset = cpcache->get_f1_offset(index);
 
+      // Load CallSite object from constant pool cache.
+      LIR_Opr call_site = new_register(objectType);
+      __ oop2reg(cpcache->constant_encoding(), call_site);
+      __ move_wide(new LIR_Address(call_site, call_site_offset, T_OBJECT), call_site);
+
       // If this invokedynamic call site hasn't been executed yet in
       // the interpreter, the CallSite object in the constant pool
       // cache is still null and we need to deoptimize.
       if (cpcache->is_f1_null_at(index)) {
-        // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
-        // clone all handlers.  This is handled transparently in other
-        // places by the CodeEmitInfo cloning logic but is handled
-        // specially here because a stub isn't being used.
-        x->set_exception_handlers(new XHandlers(x->exception_handlers()));
-
+        // Only deoptimize if the CallSite object is still null; we don't
+        // recompile methods in C1 after deoptimization so this call site
+        // might be resolved the next time we execute it after OSR.
         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
-        __ jump(deopt_stub);
+        __ cmp(lir_cond_equal, call_site, LIR_OprFact::oopConst(NULL));
+        __ branch(lir_cond_equal, T_OBJECT, deopt_stub);
       }
 
       // Use the receiver register for the synthetic MethodHandle
       // argument.
       receiver = LIR_Assembler::receiverOpr();
-      LIR_Opr tmp = new_register(objectType);
-
-      // Load CallSite object from constant pool cache.
-      __ oop2reg(cpcache->constant_encoding(), tmp);
-      __ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
 
       // Load target MethodHandle from CallSite object.
-      __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
+      __ load(new LIR_Address(call_site, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
 
       __ call_dynamic(target, receiver, result_register,
                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
@@ -2809,7 +2837,7 @@
       break;
     }
     default:
-      ShouldNotReachHere();
+      fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
       break;
   }
 
@@ -2951,6 +2979,7 @@
     break;
 
   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
+  case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
   case vmIntrinsics::_getClass:       do_getClass(x);      break;
   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
 
@@ -2978,11 +3007,6 @@
     do_CompareAndSwap(x, longType);
     break;
 
-    // sun.misc.AtomicLongCSImpl.attemptUpdate
-  case vmIntrinsics::_attemptUpdate:
-    do_AttemptUpdate(x);
-    break;
-
   case vmIntrinsics::_Reference_get:
     do_Reference_get(x);
     break;
@@ -3223,4 +3247,3 @@
     }
   }
 }
-
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -238,12 +238,12 @@
   LIR_Opr getThreadPointer();
 
   void do_RegisterFinalizer(Intrinsic* x);
+  void do_isInstance(Intrinsic* x);
   void do_getClass(Intrinsic* x);
   void do_currentThread(Intrinsic* x);
   void do_MathIntrinsic(Intrinsic* x);
   void do_ArrayCopy(Intrinsic* x);
   void do_CompareAndSwap(Intrinsic* x, ValueType* type);
-  void do_AttemptUpdate(Intrinsic* x);
   void do_NIOCheckIndex(Intrinsic* x);
   void do_FPIntrinsics(Intrinsic* x);
   void do_Reference_get(Intrinsic* x);
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -294,6 +294,7 @@
   FUNCTION_CASE(entry, SharedRuntime::lrem);
   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
+  FUNCTION_CASE(entry, is_instance_of);
   FUNCTION_CASE(entry, trace_block_entry);
 #ifdef TRACE_HAVE_INTRINSICS
   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
@@ -1270,6 +1271,19 @@
 JRT_END
 
 
+JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
+  // had to return int instead of bool, otherwise there may be a mismatch
+  // between the C calling convention and the Java one.
+  // e.g., on x86, GCC may clear only %al when returning a bool false, but
+  // JVM takes the whole %eax as the return value, which may misinterpret
+  // the return value as a boolean true.
+
+  assert(mirror != NULL, "should null-check on mirror before calling");
+  klassOop k = java_lang_Class::as_klassOop(mirror);
+  return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
+JRT_END
+
+
 #ifndef PRODUCT
 void Runtime1::print_statistics() {
   tty->print_cr("C1 Runtime statistics:");
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -186,6 +186,7 @@
   static int  arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length);
   static void primitive_arraycopy(HeapWord* src, HeapWord* dst, int length);
   static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
+  static int  is_instance_of(oopDesc* mirror, oopDesc* obj);
 
   static void print_statistics()                 PRODUCT_RETURN;
 };
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -141,8 +141,11 @@
 
   // visitor functions
   void do_StoreField     (StoreField*      x) {
-    if (x->is_init_point()) {
-      // putstatic is an initialization point so treat it as a wide kill
+    if (x->is_init_point() ||  // putstatic is an initialization point so treat it as a wide kill
+        // This is actually too strict and the JMM doesn't require
+        // this in all cases (e.g. load a; volatile store b; load a)
+        // but possible future optimizations might require this.
+        x->field()->is_volatile()) {
       kill_memory();
     } else {
       kill_field(x->field());
@@ -160,8 +163,8 @@
   void do_Local          (Local*           x) { /* nothing to do */ }
   void do_Constant       (Constant*        x) { /* nothing to do */ }
   void do_LoadField      (LoadField*       x) {
-    if (x->is_init_point()) {
-      // getstatic is an initialization point so treat it as a wide kill
+    if (x->is_init_point() ||         // getstatic is an initialization point so treat it as a wide kill
+        x->field()->is_volatile()) {  // the JMM requires this
       kill_memory();
     }
   }
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -2919,7 +2919,6 @@
 int java_lang_AssertionStatusDirectives::packageEnabled_offset;
 int java_lang_AssertionStatusDirectives::deflt_offset;
 int java_nio_Buffer::_limit_offset;
-int sun_misc_AtomicLongCSImpl::_value_offset;
 int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
 int sun_reflect_ConstantPool::_cp_oop_offset;
 int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
@@ -2979,21 +2978,6 @@
   compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
 }
 
-// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
-int sun_misc_AtomicLongCSImpl::value_offset() {
-  assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");
-  return _value_offset;
-}
-
-
-void sun_misc_AtomicLongCSImpl::compute_offsets() {
-  klassOop k = SystemDictionary::AtomicLongCSImpl_klass();
-  // If this class is not present, its value field offset won't be referenced.
-  if (k != NULL) {
-    compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
-  }
-}
-
 void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
   if (_owner_offset != 0) return;
 
@@ -3098,7 +3082,6 @@
     sun_reflect_ConstantPool::compute_offsets();
     sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
   }
-  sun_misc_AtomicLongCSImpl::compute_offsets();
 
   // generated interpreter code wants to know about the offsets we just computed:
   AbstractAssembler::update_delayed_values();
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1383,15 +1383,6 @@
   static void compute_offsets();
 };
 
-class sun_misc_AtomicLongCSImpl: AllStatic {
- private:
-  static int _value_offset;
-
- public:
-  static int  value_offset();
-  static void compute_offsets();
-};
-
 class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
  private:
   static int  _owner_offset;
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -170,9 +170,6 @@
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */               \
   template(nio_Buffer_klass,             java_nio_Buffer,                Opt) \
                                                                               \
-  /* If this class isn't present, it won't be referenced. */                  \
-  template(AtomicLongCSImpl_klass,       sun_misc_AtomicLongCSImpl,   Opt)    \
-                                                                              \
   template(DownloadManager_klass,        sun_jkernel_DownloadManager, Opt_Kernel) \
                                                                               \
   template(PostVMInitHook_klass,         sun_misc_PostVMInitHook, Opt)        \
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1738,10 +1738,14 @@
   int target = bci + default_offset;
   stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
   for (int i = 0; i < keys; i++) {
+    // Because check_jump_target() may safepoint, the bytecode could have
+    // moved, which means 'aligned_bcp' is no good and needs to be recalculated.
+    aligned_bcp = (address)round_to((intptr_t)(bcs->bcp() + 1), jintSize);
     target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
     stackmap_table->check_jump_target(
       current_frame, target, CHECK_VERIFY(this));
   }
+  NOT_PRODUCT(aligned_bcp = NULL);  // no longer valid at this point
 }
 
 bool ClassVerifier::name_in_supers(
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -722,15 +722,6 @@
   /* java/lang/ref/Reference */                                                                                         \
   do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
                                                                                                                         \
-                                                                                                                        \
-  do_class(sun_misc_AtomicLongCSImpl,     "sun/misc/AtomicLongCSImpl")                                                  \
-  do_intrinsic(_get_AtomicLong,           sun_misc_AtomicLongCSImpl, get_name, void_long_signature,              F_R)   \
-  /*   (symbols get_name and void_long_signature defined above) */                                                      \
-                                                                                                                        \
-  do_intrinsic(_attemptUpdate,            sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R)  \
-   do_name(     attemptUpdate_name,                                 "attemptUpdate")                                    \
-   do_signature(attemptUpdate_signature,                            "(JJ)Z")                                            \
-                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -293,7 +293,7 @@
     // Java thread is waiting for a full GC to happen (e.g., it
     // called System.gc() with +ExplicitGCInvokesConcurrent).
     _sts.join();
-    g1h->increment_full_collections_completed(true /* concurrent */);
+    g1h->increment_old_marking_cycles_completed(true /* concurrent */);
     _sts.leave();
   }
   assert(_should_terminate, "just checking");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1299,6 +1299,7 @@
 
     gc_prologue(true);
     increment_total_collections(true /* full gc */);
+    increment_old_marking_cycles_started();
 
     size_t g1h_prev_used = used();
     assert(used() == recalculate_used(), "Should be equal");
@@ -1492,22 +1493,28 @@
     JavaThread::dirty_card_queue_set().abandon_logs();
     assert(!G1DeferredRSUpdate
            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
-  }
-
-  _young_list->reset_sampled_info();
-  // At this point there should be no regions in the
-  // entire heap tagged as young.
-  assert( check_young_list_empty(true /* check_heap */),
-    "young list should be empty at this point");
-
-  // Update the number of full collections that have been completed.
-  increment_full_collections_completed(false /* concurrent */);
-
-  _hrs.verify_optional();
-  verify_region_sets_optional();
-
-  print_heap_after_gc();
-  g1mm()->update_sizes();
+
+    _young_list->reset_sampled_info();
+    // At this point there should be no regions in the
+    // entire heap tagged as young.
+    assert( check_young_list_empty(true /* check_heap */),
+      "young list should be empty at this point");
+
+    // Update the number of full collections that have been completed.
+    increment_old_marking_cycles_completed(false /* concurrent */);
+
+    _hrs.verify_optional();
+    verify_region_sets_optional();
+
+    print_heap_after_gc();
+
+    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+    // before any GC notifications are raised.
+    g1mm()->update_sizes();
+  }
+
   post_full_gc_dump();
 
   return true;
@@ -1888,7 +1895,8 @@
   _retained_old_gc_alloc_region(NULL),
   _expand_heap_after_alloc_failure(true),
   _surviving_young_words(NULL),
-  _full_collections_completed(0),
+  _old_marking_cycles_started(0),
+  _old_marking_cycles_completed(0),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL),
@@ -2360,7 +2368,16 @@
 }
 #endif // !PRODUCT
 
-void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
+void G1CollectedHeap::increment_old_marking_cycles_started() {
+  assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
+    _old_marking_cycles_started == _old_marking_cycles_completed + 1,
+    err_msg("Wrong marking cycle count (started: %d, completed: %d)",
+    _old_marking_cycles_started, _old_marking_cycles_completed));
+
+  _old_marking_cycles_started++;
+}
+
+void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 
   // We assume that if concurrent == true, then the caller is a
@@ -2368,11 +2385,6 @@
   // Set. If there's ever a cheap way to check this, we should add an
   // assert here.
 
-  // We have already incremented _total_full_collections at the start
-  // of the GC, so total_full_collections() represents how many full
-  // collections have been started.
-  unsigned int full_collections_started = total_full_collections();
-
   // Given that this method is called at the end of a Full GC or of a
   // concurrent cycle, and those can be nested (i.e., a Full GC can
   // interrupt a concurrent cycle), the number of full collections
@@ -2382,21 +2394,21 @@
 
   // This is the case for the inner caller, i.e. a Full GC.
   assert(concurrent ||
-         (full_collections_started == _full_collections_completed + 1) ||
-         (full_collections_started == _full_collections_completed + 2),
-         err_msg("for inner caller (Full GC): full_collections_started = %u "
-                 "is inconsistent with _full_collections_completed = %u",
-                 full_collections_started, _full_collections_completed));
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
+         err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
+                 "is inconsistent with _old_marking_cycles_completed = %u",
+                 _old_marking_cycles_started, _old_marking_cycles_completed));
 
   // This is the case for the outer caller, i.e. the concurrent cycle.
   assert(!concurrent ||
-         (full_collections_started == _full_collections_completed + 1),
+         (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
          err_msg("for outer caller (concurrent cycle): "
-                 "full_collections_started = %u "
-                 "is inconsistent with _full_collections_completed = %u",
-                 full_collections_started, _full_collections_completed));
-
-  _full_collections_completed += 1;
+                 "_old_marking_cycles_started = %u "
+                 "is inconsistent with _old_marking_cycles_completed = %u",
+                 _old_marking_cycles_started, _old_marking_cycles_completed));
+
+  _old_marking_cycles_completed += 1;
 
   // We need to clear the "in_progress" flag in the CM thread before
   // we wake up any waiters (especially when ExplicitInvokesConcurrent
@@ -2432,7 +2444,7 @@
   assert_heap_not_locked();
 
   unsigned int gc_count_before;
-  unsigned int full_gc_count_before;
+  unsigned int old_marking_count_before;
   bool retry_gc;
 
   do {
@@ -2443,7 +2455,7 @@
 
       // Read the GC count while holding the Heap_lock
       gc_count_before = total_collections();
-      full_gc_count_before = total_full_collections();
+      old_marking_count_before = _old_marking_cycles_started;
     }
 
     if (should_do_concurrent_full_gc(cause)) {
@@ -2458,7 +2470,7 @@
 
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
-        if (full_gc_count_before == total_full_collections()) {
+        if (old_marking_count_before == _old_marking_cycles_started) {
           retry_gc = op.should_retry_gc();
         } else {
           // A Full GC happened while we were trying to schedule the
@@ -2486,7 +2498,7 @@
         VMThread::execute(&op);
       } else {
         // Schedule a Full GC.
-        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
+        VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
         VMThread::execute(&op);
       }
     }
@@ -3613,7 +3625,7 @@
     if (g1_policy()->during_initial_mark_pause()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
-      increment_total_full_collections();
+      increment_old_marking_cycles_started();
     }
     // if the log level is "finer" is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
@@ -3930,25 +3942,30 @@
 
       gc_epilogue(false);
     }
-  }
-
-  // The closing of the inner scope, immediately above, will complete
-  // logging at the "fine" level. The record_collection_pause_end() call
-  // above will complete logging at the "finer" level.
-  //
-  // It is not yet to safe, however, to tell the concurrent mark to
-  // start as we have some optional output below. We don't want the
-  // output from the concurrent mark thread interfering with this
-  // logging output either.
-
-  _hrs.verify_optional();
-  verify_region_sets_optional();
-
-  TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
-  TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
-
-  print_heap_after_gc();
-  g1mm()->update_sizes();
+
+    // The closing of the inner scope, immediately above, will complete
+    // logging at the "fine" level. The record_collection_pause_end() call
+    // above will complete logging at the "finer" level.
+    //
+    // It is not yet to safe, however, to tell the concurrent mark to
+    // start as we have some optional output below. We don't want the
+    // output from the concurrent mark thread interfering with this
+    // logging output either.
+
+    _hrs.verify_optional();
+    verify_region_sets_optional();
+
+    TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
+    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
+
+    print_heap_after_gc();
+
+    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+    // before any GC notifications are raised.
+    g1mm()->update_sizes();
+  }
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -359,10 +359,13 @@
   // (c) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
-  // Keeps track of how many "full collections" (i.e., Full GCs or
-  // concurrent cycles) we have completed. The number of them we have
-  // started is maintained in _total_full_collections in CollectedHeap.
-  volatile unsigned int _full_collections_completed;
+  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
+  // concurrent cycles) we have started.
+  volatile unsigned int _old_marking_cycles_started;
+
+  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
+  // concurrent cycles) we have completed.
+  volatile unsigned int _old_marking_cycles_completed;
 
   // This is a non-product method that is helpful for testing. It is
   // called at the end of a GC and artificially expands the heap by
@@ -673,8 +676,12 @@
            (size_t) _in_cset_fast_test_length * sizeof(bool));
   }
 
+  // This is called at the start of either a concurrent cycle or a Full
+  // GC to update the number of old marking cycles started.
+  void increment_old_marking_cycles_started();
+
   // This is called at the end of either a concurrent cycle or a Full
-  // GC to update the number of full collections completed. Those two
+  // GC to update the number of old marking cycles completed. Those two
   // can happen in a nested fashion, i.e., we start a concurrent
   // cycle, a Full GC happens half-way through it which ends first,
   // and then the cycle notices that a Full GC happened and ends
@@ -683,14 +690,14 @@
   // false, the caller is the inner caller in the nesting (i.e., the
   // Full GC). If concurrent is true, the caller is the outer caller
   // in this nesting (i.e., the concurrent cycle). Further nesting is
-  // not currently supported. The end of the this call also notifies
+  // not currently supported. The end of this call also notifies
   // the FullGCCount_lock in case a Java thread is waiting for a full
   // GC to happen (e.g., it called System.gc() with
   // +ExplicitGCInvokesConcurrent).
-  void increment_full_collections_completed(bool concurrent);
+  void increment_old_marking_cycles_completed(bool concurrent);
 
-  unsigned int full_collections_completed() {
-    return _full_collections_completed;
+  unsigned int old_marking_cycles_completed() {
+    return _old_marking_cycles_completed;
   }
 
   G1HRPrinter* hr_printer() { return &_hr_printer; }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -64,7 +64,7 @@
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
     _should_retry_gc(false),
-    _full_collections_completed_before(0) {
+    _old_marking_cycles_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
@@ -112,11 +112,11 @@
 
   GCCauseSetter x(g1h, _gc_cause);
   if (_should_initiate_conc_mark) {
-    // It's safer to read full_collections_completed() here, given
+    // It's safer to read old_marking_cycles_completed() here, given
     // that noone else will be updating it concurrently. Since we'll
     // only need it if we're initiating a marking cycle, no point in
     // setting it earlier.
-    _full_collections_completed_before = g1h->full_collections_completed();
+    _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
 
     // At this point we are supposed to start a concurrent cycle. We
     // will do so if one is not already in progress.
@@ -181,17 +181,17 @@
 
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-    // In the doit() method we saved g1h->full_collections_completed()
-    // in the _full_collections_completed_before field. We have to
-    // wait until we observe that g1h->full_collections_completed()
+    // In the doit() method we saved g1h->old_marking_cycles_completed()
+    // in the _old_marking_cycles_completed_before field. We have to
+    // wait until we observe that g1h->old_marking_cycles_completed()
     // has increased by at least one. This can happen if a) we started
     // a cycle and it completes, b) a cycle already in progress
     // completes, or c) a Full GC happens.
 
     // If the condition has already been reached, there's no point in
     // actually taking the lock and doing the wait.
-    if (g1h->full_collections_completed() <=
-                                          _full_collections_completed_before) {
+    if (g1h->old_marking_cycles_completed() <=
+                                          _old_marking_cycles_completed_before) {
       // The following is largely copied from CMS
 
       Thread* thr = Thread::current();
@@ -200,8 +200,8 @@
       ThreadToNativeFromVM native(jt);
 
       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-      while (g1h->full_collections_completed() <=
-                                          _full_collections_completed_before) {
+      while (g1h->old_marking_cycles_completed() <=
+                                          _old_marking_cycles_completed_before) {
         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
       }
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -80,7 +80,7 @@
   bool         _should_initiate_conc_mark;
   bool         _should_retry_gc;
   double       _target_pause_time_ms;
-  unsigned int _full_collections_completed_before;
+  unsigned int _old_marking_cycles_completed_before;
 public:
   VM_G1IncCollectionPause(unsigned int   gc_count_before,
                           size_t         word_size,
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -844,6 +844,14 @@
     int bci = method->bci_from(fr.interpreter_frame_bcp());
     nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
   }
+#ifndef PRODUCT
+  if (TraceOnStackReplacement) {
+    if (nm != NULL) {
+      tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", nm->osr_entry());
+      nm->print();
+    }
+  }
+#endif
   return nm;
 }
 
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -230,7 +230,7 @@
   link_tail(chunk);
 
   assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  FreeList<Chunk>::increment_count();
+  increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -258,7 +258,7 @@
   }
   head()->link_after(chunk);
   assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  FreeList<Chunk>::increment_count();
+  increment_count();
   debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -909,6 +909,7 @@
 
 template <class Chunk>
 class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
+  using TreeCensusClosure<Chunk>::do_list;
  public:
   void do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
@@ -921,6 +922,7 @@
 
 template <class Chunk>
 class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> {
+  using TreeCensusClosure<Chunk>::do_list;
  public:
   void do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
@@ -987,6 +989,7 @@
 
 template <class Chunk>
 class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> {
+  using TreeSearchClosure<Chunk>::do_list;
  public:
   bool do_tree(TreeList<Chunk>* tl) {
     if (tl != NULL) {
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -60,13 +60,18 @@
   TreeList<Chunk>* left()   const { return _left;   }
   TreeList<Chunk>* right()  const { return _right;  }
 
-  // Wrapper on call to base class, to get the template to compile.
-  Chunk* head() const { return FreeList<Chunk>::head(); }
-  Chunk* tail() const { return FreeList<Chunk>::tail(); }
-  void set_head(Chunk* head) { FreeList<Chunk>::set_head(head); }
-  void set_tail(Chunk* tail) { FreeList<Chunk>::set_tail(tail); }
+  // Explicitly import these names into our namespace to fix name lookup with templates
+  using FreeList<Chunk>::head;
+  using FreeList<Chunk>::set_head;
 
-  size_t size() const { return FreeList<Chunk>::size(); }
+  using FreeList<Chunk>::tail;
+  using FreeList<Chunk>::set_tail;
+  using FreeList<Chunk>::link_tail;
+
+  using FreeList<Chunk>::increment_count;
+  NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
+  using FreeList<Chunk>::verify_chunk_in_free_list;
+  using FreeList<Chunk>::size;
 
   // Accessors for links in tree.
 
--- a/hotspot/src/share/vm/oops/constMethodKlass.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/constMethodKlass.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,7 +80,7 @@
   No_Safepoint_Verifier no_safepoint;
   cm->set_interpreter_kind(Interpreter::invalid);
   cm->init_fingerprint();
-  cm->set_method(NULL);
+  cm->set_constants(NULL);
   cm->set_stackmap_data(NULL);
   cm->set_exception_table(NULL);
   cm->set_code_size(byte_code_size);
@@ -98,7 +98,7 @@
 void constMethodKlass::oop_follow_contents(oop obj) {
   assert (obj->is_constMethod(), "object must be constMethod");
   constMethodOop cm = constMethodOop(obj);
-  MarkSweep::mark_and_push(cm->adr_method());
+  MarkSweep::mark_and_push(cm->adr_constants());
   MarkSweep::mark_and_push(cm->adr_stackmap_data());
   MarkSweep::mark_and_push(cm->adr_exception_table());
   // Performance tweak: We skip iterating over the klass pointer since we
@@ -110,7 +110,7 @@
                                            oop obj) {
   assert (obj->is_constMethod(), "object must be constMethod");
   constMethodOop cm_oop = constMethodOop(obj);
-  PSParallelCompact::mark_and_push(cm, cm_oop->adr_method());
+  PSParallelCompact::mark_and_push(cm, cm_oop->adr_constants());
   PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data());
   PSParallelCompact::mark_and_push(cm, cm_oop->adr_exception_table());
   // Performance tweak: We skip iterating over the klass pointer since we
@@ -121,7 +121,7 @@
 int constMethodKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
   assert (obj->is_constMethod(), "object must be constMethod");
   constMethodOop cm = constMethodOop(obj);
-  blk->do_oop(cm->adr_method());
+  blk->do_oop(cm->adr_constants());
   blk->do_oop(cm->adr_stackmap_data());
   blk->do_oop(cm->adr_exception_table());
   // Get size before changing pointers.
@@ -135,7 +135,7 @@
   assert (obj->is_constMethod(), "object must be constMethod");
   constMethodOop cm = constMethodOop(obj);
   oop* adr;
-  adr = cm->adr_method();
+  adr = cm->adr_constants();
   if (mr.contains(adr)) blk->do_oop(adr);
   adr = cm->adr_stackmap_data();
   if (mr.contains(adr)) blk->do_oop(adr);
@@ -153,7 +153,7 @@
 int constMethodKlass::oop_adjust_pointers(oop obj) {
   assert(obj->is_constMethod(), "should be constMethod");
   constMethodOop cm = constMethodOop(obj);
-  MarkSweep::adjust_pointer(cm->adr_method());
+  MarkSweep::adjust_pointer(cm->adr_constants());
   MarkSweep::adjust_pointer(cm->adr_stackmap_data());
   MarkSweep::adjust_pointer(cm->adr_exception_table());
   // Get size before changing pointers.
@@ -188,8 +188,8 @@
   assert(obj->is_constMethod(), "must be constMethod");
   Klass::oop_print_on(obj, st);
   constMethodOop m = constMethodOop(obj);
-  st->print(" - method:       " INTPTR_FORMAT " ", (address)m->method());
-  m->method()->print_value_on(st); st->cr();
+  st->print(" - constants:       " INTPTR_FORMAT " ", (address)m->constants());
+  m->constants()->print_value_on(st); st->cr();
   st->print(" - exceptions:   " INTPTR_FORMAT "\n", (address)m->exception_table());
   if (m->has_stackmap_table()) {
     st->print(" - stackmap data:       ");
@@ -223,8 +223,8 @@
   // Verification can occur during oop construction before the method or
   // other fields have been initialized.
   if (!obj->partially_loaded()) {
-    guarantee(m->method()->is_perm(), "should be in permspace");
-    guarantee(m->method()->is_method(), "should be method");
+    guarantee(m->constants()->is_perm(), "should be in permspace");
+    guarantee(m->constants()->is_constantPool(), "should be constant pool");
     typeArrayOop stackmap_data = m->stackmap_data();
     guarantee(stackmap_data == NULL ||
               stackmap_data->is_perm(),  "should be in permspace");
--- a/hotspot/src/share/vm/oops/constMethodOop.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/constMethodOop.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,10 @@
   return align_object_size(header_size() + extra_words);
 }
 
+methodOop constMethodOopDesc::method() const {
+    return instanceKlass::cast(_constants->pool_holder())->method_with_idnum(
+                               _method_idnum);
+  }
 
 // linenumber table - note that length is unknown until decompression,
 // see class CompressedLineNumberReadStream.
--- a/hotspot/src/share/vm/oops/constMethodOop.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/constMethodOop.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
 // |------------------------------------------------------|
 // | fingerprint 1                                        |
 // | fingerprint 2                                        |
-// | method                         (oop)                 |
+// | constants                      (oop)                 |
 // | stackmap_data                  (oop)                 |
 // | exception_table                (oop)                 |
 // | constMethod_size                                     |
@@ -113,7 +113,7 @@
   volatile bool     _is_conc_safe; // if true, safe for concurrent GC processing
 
 public:
-  oop* oop_block_beg() const { return adr_method(); }
+  oop* oop_block_beg() const { return adr_constants(); }
   oop* oop_block_end() const { return adr_exception_table() + 1; }
 
 private:
@@ -121,8 +121,7 @@
   // The oop block.  See comment in klass.hpp before making changes.
   //
 
-  // Backpointer to non-const methodOop (needed for some JVMTI operations)
-  methodOop         _method;
+  constantPoolOop   _constants;                  // Constant pool
 
   // Raw stackmap data for the method
   typeArrayOop      _stackmap_data;
@@ -167,10 +166,13 @@
   void set_interpreter_kind(int kind)      { _interpreter_kind = kind; }
   int  interpreter_kind(void) const        { return _interpreter_kind; }
 
-  // backpointer to non-const methodOop
-  methodOop method() const                 { return _method; }
-  void set_method(methodOop m)             { oop_store_without_check((oop*)&_method, (oop) m); }
+  // constant pool
+  constantPoolOop constants() const        { return _constants; }
+  void set_constants(constantPoolOop c)    {
+    oop_store_without_check((oop*)&_constants, (oop)c);
+  }
 
+  methodOop method() const;
 
   // stackmap table data
   typeArrayOop stackmap_data() const { return _stackmap_data; }
@@ -278,11 +280,13 @@
                             { return in_ByteSize(sizeof(constMethodOopDesc)); }
 
   // interpreter support
+  static ByteSize constants_offset()
+               { return byte_offset_of(constMethodOopDesc, _constants); }
   static ByteSize exception_table_offset()
                { return byte_offset_of(constMethodOopDesc, _exception_table); }
 
   // Garbage collection support
-  oop*  adr_method() const             { return (oop*)&_method;          }
+  oop*  adr_constants() const          { return (oop*)&_constants; }
   oop*  adr_stackmap_data() const      { return (oop*)&_stackmap_data;   }
   oop*  adr_exception_table() const    { return (oop*)&_exception_table; }
   bool is_conc_safe() { return _is_conc_safe; }
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,11 +112,6 @@
 
   assert(m->is_parsable(), "must be parsable here.");
   assert(m->size() == size, "wrong size for object");
-  // We should not publish an uprasable object's reference
-  // into one that is parsable, since that presents problems
-  // for the concurrent parallel marking and precleaning phases
-  // of concurrent gc (CMS).
-  xconst->set_method(m);
   return m;
 }
 
@@ -127,7 +122,6 @@
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::methodKlassObj never moves.
   MarkSweep::mark_and_push(m->adr_constMethod());
-  MarkSweep::mark_and_push(m->adr_constants());
   if (m->method_data() != NULL) {
     MarkSweep::mark_and_push(m->adr_method_data());
   }
@@ -141,7 +135,6 @@
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::methodKlassObj never moves.
   PSParallelCompact::mark_and_push(cm, m->adr_constMethod());
-  PSParallelCompact::mark_and_push(cm, m->adr_constants());
 #ifdef COMPILER2
   if (m->method_data() != NULL) {
     PSParallelCompact::mark_and_push(cm, m->adr_method_data());
@@ -159,7 +152,6 @@
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::methodKlassObj never moves
   blk->do_oop(m->adr_constMethod());
-  blk->do_oop(m->adr_constants());
   if (m->method_data() != NULL) {
     blk->do_oop(m->adr_method_data());
   }
@@ -178,8 +170,6 @@
   oop* adr;
   adr = m->adr_constMethod();
   if (mr.contains(adr)) blk->do_oop(adr);
-  adr = m->adr_constants();
-  if (mr.contains(adr)) blk->do_oop(adr);
   if (m->method_data() != NULL) {
     adr = m->adr_method_data();
     if (mr.contains(adr)) blk->do_oop(adr);
@@ -197,7 +187,6 @@
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::methodKlassObj never moves.
   MarkSweep::adjust_pointer(m->adr_constMethod());
-  MarkSweep::adjust_pointer(m->adr_constants());
   if (m->method_data() != NULL) {
     MarkSweep::adjust_pointer(m->adr_method_data());
   }
@@ -213,7 +202,6 @@
   assert(obj->is_method(), "should be method");
   methodOop m = methodOop(obj);
   PSParallelCompact::adjust_pointer(m->adr_constMethod());
-  PSParallelCompact::adjust_pointer(m->adr_constants());
 #ifdef COMPILER2
   if (m->method_data() != NULL) {
     PSParallelCompact::adjust_pointer(m->adr_method_data());
@@ -339,8 +327,6 @@
   if (!obj->partially_loaded()) {
     methodOop m = methodOop(obj);
     guarantee(m->is_perm(),  "should be in permspace");
-    guarantee(m->constants()->is_perm(), "should be in permspace");
-    guarantee(m->constants()->is_constantPool(), "should be constant pool");
     guarantee(m->constMethod()->is_constMethod(), "should be constMethodOop");
     guarantee(m->constMethod()->is_perm(), "should be in permspace");
     methodDataOop method_data = m->method_data();
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -70,11 +70,11 @@
   return _adapter->get_c2i_unverified_entry();
 }
 
-char* methodOopDesc::name_and_sig_as_C_string() {
+char* methodOopDesc::name_and_sig_as_C_string() const {
   return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature());
 }
 
-char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) {
+char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) const {
   return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size);
 }
 
@@ -177,7 +177,8 @@
 
 
 int methodOopDesc::bci_from(address bcp) const {
-  assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(), "bcp doesn't belong to this method");
+  assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(),
+         err_msg("bcp doesn't belong to this method: bcp: " INTPTR_FORMAT ", method: %s", bcp, name_and_sig_as_C_string()));
   return bcp - code_base();
 }
 
@@ -531,9 +532,9 @@
 
 
 bool methodOopDesc::is_klass_loaded_by_klass_index(int klass_index) const {
-  if( _constants->tag_at(klass_index).is_unresolved_klass() ) {
+  if( constants()->tag_at(klass_index).is_unresolved_klass() ) {
     Thread *thread = Thread::current();
-    Symbol* klass_name = _constants->klass_name_at(klass_index);
+    Symbol* klass_name = constants()->klass_name_at(klass_index);
     Handle loader(thread, instanceKlass::cast(method_holder())->class_loader());
     Handle prot  (thread, Klass::cast(method_holder())->protection_domain());
     return SystemDictionary::find(klass_name, loader, prot, thread) != NULL;
@@ -544,7 +545,7 @@
 
 
 bool methodOopDesc::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
-  int klass_index = _constants->klass_ref_index_at(refinfo_index);
+  int klass_index = constants()->klass_ref_index_at(refinfo_index);
   if (must_be_resolved) {
     // Make sure klass is resolved in constantpool.
     if (constants()->tag_at(klass_index).is_unresolved_klass()) return false;
@@ -886,11 +887,13 @@
 }
 
 jint* methodOopDesc::method_type_offsets_chain() {
-  static jint pchase[] = { -1, -1, -1 };
+  static jint pchase[] = { -1, -1, -1, -1 };
   if (pchase[0] == -1) {
-    jint step0 = in_bytes(constants_offset());
-    jint step1 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize;
+    jint step0 = in_bytes(const_offset());
+    jint step1 = in_bytes(constMethodOopDesc::constants_offset());
+    jint step2 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize;
     // do this in reverse to avoid races:
+    OrderAccess::release_store(&pchase[2], step2);
     OrderAccess::release_store(&pchase[1], step1);
     OrderAccess::release_store(&pchase[0], step0);
   }
@@ -1076,9 +1079,7 @@
   assert(m->constMethod()->is_parsable(), "Should remain parsable");
 
   // Reset correct method/const method, method size, and parameter info
-  newcm->set_method(newm());
   newm->set_constMethod(newcm);
-  assert(newcm->method() == newm(), "check");
   newm->constMethod()->set_code_size(new_code_length);
   newm->constMethod()->set_constMethod_size(new_const_method_size);
   newm->set_method_size(new_method_size);
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -64,7 +64,6 @@
 // | klass                                                |
 // |------------------------------------------------------|
 // | constMethodOop                 (oop)                 |
-// | constants                      (oop)                 |
 // |------------------------------------------------------|
 // | methodData                     (oop)                 |
 // | interp_invocation_count                              |
@@ -110,7 +109,6 @@
  friend class VMStructs;
  private:
   constMethodOop    _constMethod;                // Method read-only data.
-  constantPoolOop   _constants;                  // Constant pool
   methodDataOop     _method_data;
   int               _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
   AccessFlags       _access_flags;               // Access flags
@@ -170,17 +168,17 @@
   void set_access_flags(AccessFlags flags)       { _access_flags = flags; }
 
   // name
-  Symbol* name() const                           { return _constants->symbol_at(name_index()); }
+  Symbol* name() const                           { return constants()->symbol_at(name_index()); }
   int name_index() const                         { return constMethod()->name_index();         }
   void set_name_index(int index)                 { constMethod()->set_name_index(index);       }
 
   // signature
-  Symbol* signature() const                      { return _constants->symbol_at(signature_index()); }
+  Symbol* signature() const                      { return constants()->symbol_at(signature_index()); }
   int signature_index() const                    { return constMethod()->signature_index();         }
   void set_signature_index(int index)            { constMethod()->set_signature_index(index);       }
 
   // generics support
-  Symbol* generic_signature() const              { int idx = generic_signature_index(); return ((idx != 0) ? _constants->symbol_at(idx) : (Symbol*)NULL); }
+  Symbol* generic_signature() const              { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
   int generic_signature_index() const            { return constMethod()->generic_signature_index(); }
   void set_generic_signature_index(int index)    { constMethod()->set_generic_signature_index(index); }
 
@@ -198,8 +196,8 @@
   // C string, for the purpose of providing more useful NoSuchMethodErrors
   // and fatal error handling. The string is allocated in resource
   // area if a buffer is not provided by the caller.
-  char* name_and_sig_as_C_string();
-  char* name_and_sig_as_C_string(char* buf, int size);
+  char* name_and_sig_as_C_string() const;
+  char* name_and_sig_as_C_string(char* buf, int size) const;
 
   // Static routine in the situations we don't have a methodOop
   static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
@@ -242,8 +240,8 @@
   }
 
   // constant pool for klassOop holding this method
-  constantPoolOop constants() const              { return _constants; }
-  void set_constants(constantPoolOop c)          { oop_store_without_check((oop*)&_constants, c); }
+  constantPoolOop constants() const              { return constMethod()->constants(); }
+  void set_constants(constantPoolOop c)          { constMethod()->set_constants(c); }
 
   // max stack
   int  max_stack() const                         { return _max_stack; }
@@ -453,7 +451,7 @@
                        { return constMethod()->compressed_linenumber_table(); }
 
   // method holder (the klassOop holding this method)
-  klassOop method_holder() const                 { return _constants->pool_holder(); }
+  klassOop method_holder() const                 { return constants()->pool_holder(); }
 
   void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
   Symbol* klass_name() const;                    // returns the name of the method holder
@@ -544,7 +542,6 @@
 
   // interpreter support
   static ByteSize const_offset()                 { return byte_offset_of(methodOopDesc, _constMethod       ); }
-  static ByteSize constants_offset()             { return byte_offset_of(methodOopDesc, _constants         ); }
   static ByteSize access_flags_offset()          { return byte_offset_of(methodOopDesc, _access_flags      ); }
 #ifdef CC_INTERP
   static ByteSize result_index_offset()          { return byte_offset_of(methodOopDesc, _result_index ); }
@@ -723,7 +720,6 @@
 
   // Garbage collection support
   oop*  adr_constMethod() const                  { return (oop*)&_constMethod;     }
-  oop*  adr_constants() const                    { return (oop*)&_constants;       }
   oop*  adr_method_data() const                  { return (oop*)&_method_data;     }
 };
 
--- a/hotspot/src/share/vm/opto/classes.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -147,7 +147,6 @@
 macro(LoadL)
 macro(LoadL_unaligned)
 macro(LoadPLocked)
-macro(LoadLLocked)
 macro(LoadP)
 macro(LoadN)
 macro(LoadRange)
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -2297,7 +2297,6 @@
   case Op_LoadL:
   case Op_LoadL_unaligned:
   case Op_LoadPLocked:
-  case Op_LoadLLocked:
   case Op_LoadP:
   case Op_LoadN:
   case Op_LoadRange:
--- a/hotspot/src/share/vm/opto/divnode.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/divnode.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -284,9 +284,14 @@
 
   const int N = 64;
 
+  // Dummy node to keep intermediate nodes alive during construction
+  Node* hook = new (phase->C, 4) Node(4);
+
   // u0 = u & 0xFFFFFFFF;  u1 = u >> 32;
   Node* u0 = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF)));
   Node* u1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2)));
+  hook->init_req(0, u0);
+  hook->init_req(1, u1);
 
   // v0 = v & 0xFFFFFFFF;  v1 = v >> 32;
   Node* v0 = phase->longcon(magic_const & 0xFFFFFFFF);
@@ -299,19 +304,14 @@
   Node* u1v0 = phase->transform(new (phase->C, 3) MulLNode(u1, v0));
   Node* temp = phase->transform(new (phase->C, 3) URShiftLNode(w0, phase->intcon(N / 2)));
   Node* t    = phase->transform(new (phase->C, 3) AddLNode(u1v0, temp));
+  hook->init_req(2, t);
 
   // w1 = t & 0xFFFFFFFF;
-  Node* w1 = new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF));
+  Node* w1 = phase->transform(new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF)));
+  hook->init_req(3, w1);
 
   // w2 = t >> 32;
-  Node* w2 = new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2));
-
-  // 6732154: Construct both w1 and w2 before transforming, so t
-  // doesn't go dead prematurely.
-  // 6837011: We need to transform w2 before w1 because the
-  // transformation of w1 could return t.
-  w2 = phase->transform(w2);
-  w1 = phase->transform(w1);
+  Node* w2 = phase->transform(new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2)));
 
   // w1 = u0*v1 + w1;
   Node* u0v1 = phase->transform(new (phase->C, 3) MulLNode(u0, v1));
@@ -322,6 +322,16 @@
   Node* temp1 = phase->transform(new (phase->C, 3) AddLNode(u1v1, w2));
   Node* temp2 = phase->transform(new (phase->C, 3) RShiftLNode(w1, phase->intcon(N / 2)));
 
+  // Remove the bogus extra edges used to keep things alive
+  PhaseIterGVN* igvn = phase->is_IterGVN();
+  if (igvn != NULL) {
+    igvn->remove_dead_node(hook);
+  } else {
+    for (int i = 0; i < 4; i++) {
+      hook->set_req(i, NULL);
+    }
+  }
+
   return new (phase->C, 3) AddLNode(temp1, temp2);
 }
 
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -465,15 +465,11 @@
           // Kill dead input path
           assert( !visited.test(whead->in(i)->_idx),
                   "input with no loop must be dead" );
-          _igvn.hash_delete(whead);
-          whead->del_req(i);
-          _igvn._worklist.push(whead);
+          _igvn.delete_input_of(whead, i);
           for (DUIterator_Fast jmax, j = whead->fast_outs(jmax); j < jmax; j++) {
             Node* p = whead->fast_out(j);
             if( p->is_Phi() ) {
-              _igvn.hash_delete(p);
-              p->del_req(i);
-              _igvn._worklist.push(p);
+              _igvn.delete_input_of(p, i);
             }
           }
           i--;                  // Rerun same iteration
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -338,8 +338,7 @@
   Node *phi_f = NULL;     // do not construct unless needed
   for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
     Node* v = phi->last_out(i2);// User of the phi
-    igvn->hash_delete(v);       // Have to fixup other Phi users
-    igvn->_worklist.push(v);
+    igvn->rehash_node_delayed(v); // Have to fixup other Phi users
     uint vop = v->Opcode();
     Node *proj = NULL;
     if( vop == Op_Phi ) {       // Remote merge point
@@ -552,9 +551,8 @@
   if( new_cmp == cmp ) return;
   // Else, adjust existing check
   Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
-  igvn->hash_delete( iff );
+  igvn->rehash_node_delayed( iff );
   iff->set_req_X( 1, new_bol, igvn );
-  igvn->_worklist.push( iff );
 }
 
 //------------------------------up_one_dom-------------------------------------
@@ -732,9 +730,7 @@
               Node* adjusted = phase->transform(new (phase->C, 3) SubINode(n, phase->intcon(failtype->_lo)));
               Node* newcmp = phase->transform(new (phase->C, 3) CmpUNode(adjusted, phase->intcon(bound)));
               Node* newbool = phase->transform(new (phase->C, 2) BoolNode(newcmp, cond));
-              phase->hash_delete(dom_iff);
-              dom_iff->set_req(1, phase->intcon(ctrl->as_Proj()->_con));
-              phase->is_IterGVN()->_worklist.push(dom_iff);
+              phase->is_IterGVN()->replace_input_of(dom_iff, 1, phase->intcon(ctrl->as_Proj()->_con));
               phase->hash_delete(this);
               set_req(1, newbool);
               return this;
@@ -1042,17 +1038,15 @@
     // Loop ends when projection has no more uses.
     for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
       Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
-      igvn->hash_delete(s);         // Yank from hash table before edge hacking
       if( !s->depends_only_on_test() ) {
         // Find the control input matching this def-use edge.
         // For Regions it may not be in slot 0.
         uint l;
         for( l = 0; s->in(l) != ifp; l++ ) { }
-        s->set_req(l, ctrl_target);
+        igvn->replace_input_of(s, l, ctrl_target);
       } else {                      // Else, for control producers,
-        s->set_req(0, data_target); // Move child to data-target
+        igvn->replace_input_of(s, 0, data_target); // Move child to data-target
       }
-      igvn->_worklist.push(s);  // Revisit collapsed Phis
     } // End for each child of a projection
 
     igvn->remove_dead_node(ifp);
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -192,8 +192,6 @@
   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
   bool inline_native_clone(bool is_virtual);
   bool inline_native_Reflection_getCallerClass();
-  bool inline_native_AtomicLong_get();
-  bool inline_native_AtomicLong_attemptUpdate();
   bool is_method_invoke_or_aux_frame(JVMState* jvms);
   // Helper function for inlining native object hash method
   bool inline_native_hashcode(bool is_virtual, bool is_static);
@@ -331,11 +329,6 @@
     // We do not intrinsify this.  The optimizer does fine with it.
     return NULL;
 
-  case vmIntrinsics::_get_AtomicLong:
-  case vmIntrinsics::_attemptUpdate:
-    if (!InlineAtomicLong)  return NULL;
-    break;
-
   case vmIntrinsics::_getCallerClass:
     if (!UseNewReflection)  return NULL;
     if (!InlineReflectionGetCallerClass)  return NULL;
@@ -711,11 +704,6 @@
   case vmIntrinsics::_reverseBytes_c:
     return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
 
-  case vmIntrinsics::_get_AtomicLong:
-    return inline_native_AtomicLong_get();
-  case vmIntrinsics::_attemptUpdate:
-    return inline_native_AtomicLong_attemptUpdate();
-
   case vmIntrinsics::_getCallerClass:
     return inline_native_Reflection_getCallerClass();
 
@@ -4006,113 +3994,6 @@
   return false;
 }
 
-static int value_field_offset = -1;  // offset of the "value" field of AtomicLongCSImpl.  This is needed by
-                                     // inline_native_AtomicLong_attemptUpdate() but it has no way of
-                                     // computing it since there is no lookup field by name function in the
-                                     // CI interface.  This is computed and set by inline_native_AtomicLong_get().
-                                     // Using a static variable here is safe even if we have multiple compilation
-                                     // threads because the offset is constant.  At worst the same offset will be
-                                     // computed and  stored multiple
-
-bool LibraryCallKit::inline_native_AtomicLong_get() {
-  // Restore the stack and pop off the argument
-  _sp+=1;
-  Node *obj = pop();
-
-  // get the offset of the "value" field. Since the CI interfaces
-  // does not provide a way to look up a field by name, we scan the bytecodes
-  // to get the field index.  We expect the first 2 instructions of the method
-  // to be:
-  //    0 aload_0
-  //    1 getfield "value"
-  ciMethod* method = callee();
-  if (value_field_offset == -1)
-  {
-    ciField* value_field;
-    ciBytecodeStream iter(method);
-    Bytecodes::Code bc = iter.next();
-
-    if ((bc != Bytecodes::_aload_0) &&
-              ((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
-      return false;
-    bc = iter.next();
-    if (bc != Bytecodes::_getfield)
-      return false;
-    bool ignore;
-    value_field = iter.get_field(ignore);
-    value_field_offset = value_field->offset_in_bytes();
-  }
-
-  // Null check without removing any arguments.
-  _sp++;
-  obj = do_null_check(obj, T_OBJECT);
-  _sp--;
-  // Check for locking null object
-  if (stopped()) return true;
-
-  Node *adr = basic_plus_adr(obj, obj, value_field_offset);
-  const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
-  int alias_idx = C->get_alias_index(adr_type);
-
-  Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
-
-  push_pair(result);
-
-  return true;
-}
-
-bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
-  // Restore the stack and pop off the arguments
-  _sp+=5;
-  Node *newVal = pop_pair();
-  Node *oldVal = pop_pair();
-  Node *obj = pop();
-
-  // we need the offset of the "value" field which was computed when
-  // inlining the get() method.  Give up if we don't have it.
-  if (value_field_offset == -1)
-    return false;
-
-  // Null check without removing any arguments.
-  _sp+=5;
-  obj = do_null_check(obj, T_OBJECT);
-  _sp-=5;
-  // Check for locking null object
-  if (stopped()) return true;
-
-  Node *adr = basic_plus_adr(obj, obj, value_field_offset);
-  const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
-  int alias_idx = C->get_alias_index(adr_type);
-
-  Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
-  Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
-  set_memory(store_proj, alias_idx);
-  Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
-
-  Node *result;
-  // CMove node is not used to be able fold a possible check code
-  // after attemptUpdate() call. This code could be transformed
-  // into CMove node by loop optimizations.
-  {
-    RegionNode *r = new (C, 3) RegionNode(3);
-    result = new (C, 3) PhiNode(r, TypeInt::BOOL);
-
-    Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
-    Node *iftrue = opt_iff(r, iff);
-    r->init_req(1, iftrue);
-    result->init_req(1, intcon(1));
-    result->init_req(2, intcon(0));
-
-    set_control(_gvn.transform(r));
-    record_for_igvn(r);
-
-    C->set_has_split_ifs(true); // Has chance for split-if optimization
-  }
-
-  push(_gvn.transform(result));
-  return true;
-}
-
 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
   // restore the arguments
   _sp += arg_size();
--- a/hotspot/src/share/vm/opto/loopPredicate.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopPredicate.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -212,9 +212,8 @@
     Node* use = rgn->fast_out(i);
     if (use->is_Phi() && use->outcnt() > 0) {
       assert(use->in(0) == rgn, "");
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       use->add_req(use->in(proj_index));
-      _igvn._worklist.push(use);
       has_phi = true;
     }
   }
@@ -284,9 +283,8 @@
   for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
     Node* use = rgn->fast_out(i);
     if (use->is_Phi() && use->outcnt() > 0) {
-      hash_delete(use);
+      rehash_node_delayed(use);
       use->add_req(use->in(proj_index));
-      _worklist.push(use);
       has_phi = true;
     }
   }
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -961,9 +961,7 @@
   set_loop(zer_iff, loop->_parent);
 
   // Plug in the false-path, taken if we need to skip post-loop
-  _igvn.hash_delete( main_exit );
-  main_exit->set_req(0, zer_iff);
-  _igvn._worklist.push(main_exit);
+  _igvn.replace_input_of(main_exit, 0, zer_iff);
   set_idom(main_exit, zer_iff, dd_main_exit);
   set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
   // Make the true-path, must enter the post loop
@@ -1956,9 +1954,7 @@
       C->set_major_progress();
       Node *kill_con = _igvn.intcon( 1-flip );
       set_ctrl(kill_con, C->root());
-      _igvn.hash_delete(iff);
-      iff->set_req(1, kill_con);
-      _igvn._worklist.push(iff);
+      _igvn.replace_input_of(iff, 1, kill_con);
       // Find surviving projection
       assert(iff->is_If(), "");
       ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
@@ -1966,11 +1962,9 @@
       for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
         Node* cd = dp->fast_out(i); // Control-dependent node
         if( cd->is_Load() ) {   // Loads can now float around in the loop
-          _igvn.hash_delete(cd);
           // Allow the load to float around in the loop, or before it
           // but NOT before the pre-loop.
-          cd->set_req(0, ctrl);   // ctrl, not NULL
-          _igvn._worklist.push(cd);
+          _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
           --i;
           --imax;
         }
@@ -2029,14 +2023,10 @@
     main_bol->set_req(1,main_cmp);
   }
   // Hack the now-private loop bounds
-  _igvn.hash_delete(main_cmp);
-  main_cmp->set_req(2, main_limit);
-  _igvn._worklist.push(main_cmp);
+  _igvn.replace_input_of(main_cmp, 2, main_limit);
   // The OpaqueNode is unshared by design
-  _igvn.hash_delete(opqzm);
   assert( opqzm->outcnt() == 1, "cannot hack shared node" );
-  opqzm->set_req(1,main_limit);
-  _igvn._worklist.push(opqzm);
+  _igvn.replace_input_of(opqzm, 1, main_limit);
 }
 
 //------------------------------DCE_loop_body----------------------------------
@@ -2178,9 +2168,7 @@
     Node* cmp = cl->loopexit()->cmp_node();
     assert(cl->limit() == cmp->in(2), "sanity");
     phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
-    phase->_igvn.hash_delete(cmp);
-    cmp->set_req(2, exact_limit);
-    phase->_igvn._worklist.push(cmp);        // put cmp on worklist
+    phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
   }
   // Note: the final value after increment should not overflow since
   // counted loop has limit check predicate.
--- a/hotspot/src/share/vm/opto/loopUnswitch.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopUnswitch.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -174,27 +174,21 @@
       Node* use = worklist.pop();
       Node* nuse = use->clone();
       nuse->set_req(0, invar_proj);
-      _igvn.hash_delete(use);
-      use->set_req(1, nuse);
-      _igvn._worklist.push(use);
+      _igvn.replace_input_of(use, 1, nuse);
       register_new_node(nuse, invar_proj);
       // Same for the clone
       Node* use_clone = old_new[use->_idx];
-      _igvn.hash_delete(use_clone);
-      use_clone->set_req(1, nuse);
-      _igvn._worklist.push(use_clone);
+      _igvn.replace_input_of(use_clone, 1, nuse);
     }
   }
 
   // Hardwire the control paths in the loops into if(true) and if(false)
-  _igvn.hash_delete(unswitch_iff);
+  _igvn.rehash_node_delayed(unswitch_iff);
   short_circuit_if(unswitch_iff, proj_true);
-  _igvn._worklist.push(unswitch_iff);
 
   IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
-  _igvn.hash_delete(unswitch_iff_clone);
+  _igvn.rehash_node_delayed(unswitch_iff_clone);
   short_circuit_if(unswitch_iff_clone, proj_false);
-  _igvn._worklist.push(unswitch_iff_clone);
 
   // Reoptimize loops
   loop->record_for_igvn();
@@ -224,8 +218,7 @@
   LoopNode* head  = loop->_head->as_Loop();
   bool counted_loop = head->is_CountedLoop();
   Node*     entry = head->in(LoopNode::EntryControl);
-  _igvn.hash_delete(entry);
-  _igvn._worklist.push(entry);
+  _igvn.rehash_node_delayed(entry);
   IdealLoopTree* outer_loop = loop->_parent;
 
   Node *cont      = _igvn.intcon(1);
@@ -249,18 +242,14 @@
 
   // Fast (true) control
   Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
-  _igvn.hash_delete(head);
-  head->set_req(LoopNode::EntryControl, iffast_pred);
+  _igvn.replace_input_of(head, LoopNode::EntryControl, iffast_pred);
   set_idom(head, iffast_pred, dom_depth(head));
-  _igvn._worklist.push(head);
 
   // Slow (false) control
   Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop);
   LoopNode* slow_head = old_new[head->_idx]->as_Loop();
-  _igvn.hash_delete(slow_head);
-  slow_head->set_req(LoopNode::EntryControl, ifslow_pred);
+  _igvn.replace_input_of(slow_head, LoopNode::EntryControl, ifslow_pred);
   set_idom(slow_head, ifslow_pred, dom_depth(slow_head));
-  _igvn._worklist.push(slow_head);
 
   recompute_dom_depth();
 
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1129,8 +1129,7 @@
         // I'm mid-iteration over the Region's uses.
         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
           Node* use = old_phi->last_out(i);
-          igvn.hash_delete(use);
-          igvn._worklist.push(use);
+          igvn.rehash_node_delayed(use);
           uint uses_found = 0;
           for (uint j = 0; j < use->len(); j++) {
             if (use->in(j) == old_phi) {
@@ -1186,10 +1185,8 @@
       phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
       phi = igvn.register_new_node_with_optimizer(phi, old_phi);
       // Make old Phi point to new Phi on the fall-in path
-      igvn.hash_delete(old_phi);
-      old_phi->set_req(LoopNode::EntryControl, phi);
+      igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
       old_phi->del_req(outer_idx);
-      igvn._worklist.push(old_phi);
     }
   }
 
@@ -1992,9 +1989,7 @@
     // we do it here.
     for( uint i = 1; i < C->root()->req(); i++ ) {
       if( !_nodes[C->root()->in(i)->_idx] ) {    // Dead path into Root?
-        _igvn.hash_delete(C->root());
-        C->root()->del_req(i);
-        _igvn._worklist.push(C->root());
+        _igvn.delete_input_of(C->root(), i);
         i--;                      // Rerun same iteration on compressed edges
       }
     }
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -216,9 +216,7 @@
   Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
   set_ctrl(con, C->root()); // Constant gets a new use
   // Hack the dominated test
-  _igvn.hash_delete(iff);
-  iff->set_req(1, con);
-  _igvn._worklist.push(iff);
+  _igvn.replace_input_of(iff, 1, con);
 
   // If I dont have a reachable TRUE and FALSE path following the IfNode then
   // I can assume this path reaches an infinite loop.  In this case it's not
@@ -245,10 +243,8 @@
     Node* cd = dp->fast_out(i); // Control-dependent node
     if (cd->depends_only_on_test()) {
       assert(cd->in(0) == dp, "");
-      _igvn.hash_delete(cd);
-      cd->set_req(0, prevdom);
+      _igvn.replace_input_of(cd, 0, prevdom);
       set_early_ctrl(cd);
-      _igvn._worklist.push(cd);
       IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
       if (old_loop != new_loop) {
         if (!old_loop->_child) old_loop->_body.yank(cd);
@@ -952,8 +948,7 @@
         if (!n->is_Load() || late_load_ctrl != n_ctrl) {
           for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
             Node *u = n->last_out(j); // Clone private computation per use
-            _igvn.hash_delete(u);
-            _igvn._worklist.push(u);
+            _igvn.rehash_node_delayed(u);
             Node *x = n->clone(); // Clone computation
             Node *x_ctrl = NULL;
             if( u->is_Phi() ) {
@@ -1089,9 +1084,7 @@
   for( i = 1; i < phi->req(); i++ ) {
     Node *b = phi->in(i);
     if( b->is_Phi() ) {
-      _igvn.hash_delete(phi);
-      _igvn._worklist.push(phi);
-      phi->set_req(i, clone_iff( b->as_Phi(), loop ));
+      _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop ));
     } else {
       assert( b->is_Bool(), "" );
     }
@@ -1161,9 +1154,7 @@
   for( i = 1; i < phi->req(); i++ ) {
     Node *b = phi->in(i);
     if( b->is_Phi() ) {
-      _igvn.hash_delete(phi);
-      _igvn._worklist.push(phi);
-      phi->set_req(i, clone_bool( b->as_Phi(), loop ));
+      _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
     } else {
       assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
     }
@@ -1347,8 +1338,7 @@
         // The original user of 'use' uses 'r' instead.
         for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
           Node* useuse = use->last_out(l);
-          _igvn.hash_delete(useuse);
-          _igvn._worklist.push(useuse);
+          _igvn.rehash_node_delayed(useuse);
           uint uses_found = 0;
           if( useuse->in(0) == use ) {
             useuse->set_req(0, r);
@@ -1435,9 +1425,7 @@
         if( use->is_Phi() )     // Phi use is in prior block
           cfg = prev->in(idx);  // NOT in block of Phi itself
         if (cfg->is_top()) {    // Use is dead?
-          _igvn.hash_delete(use);
-          _igvn._worklist.push(use);
-          use->set_req(idx, C->top());
+          _igvn.replace_input_of(use, idx, C->top());
           continue;
         }
 
@@ -1487,9 +1475,7 @@
           set_ctrl(phi, prev);
         }
         // Make 'use' use the Phi instead of the old loop body exit value
-        _igvn.hash_delete(use);
-        _igvn._worklist.push(use);
-        use->set_req(idx, phi);
+        _igvn.replace_input_of(use, idx, phi);
         if( use->_idx >= new_counter ) { // If updating new phis
           // Not needed for correctness, but prevents a weak assert
           // in AddPNode from tripping (when we end up with different
@@ -1517,9 +1503,7 @@
       Node *iff = split_if_set->pop();
       if( iff->in(1)->is_Phi() ) {
         BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop );
-        _igvn.hash_delete(iff);
-        _igvn._worklist.push(iff);
-        iff->set_req(1, b);
+        _igvn.replace_input_of(iff, 1, b);
       }
     }
   }
@@ -1529,9 +1513,7 @@
       Node *phi = b->in(1);
       assert( phi->is_Phi(), "" );
       CmpNode *cmp = clone_bool( (PhiNode*)phi, loop );
-      _igvn.hash_delete(b);
-      _igvn._worklist.push(b);
-      b->set_req(1, cmp);
+      _igvn.replace_input_of(b, 1, cmp);
     }
   }
   if( split_cex_set ) {
@@ -1686,10 +1668,8 @@
   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
   int ddepth = dom_depth(proj);
 
-  _igvn.hash_delete(iff);
-  _igvn._worklist.push(iff);
-  _igvn.hash_delete(proj);
-  _igvn._worklist.push(proj);
+  _igvn.rehash_node_delayed(iff);
+  _igvn.rehash_node_delayed(proj);
 
   proj->set_req(0, NULL);  // temporary disconnect
   ProjNode* proj2 = proj_clone(proj, iff);
@@ -1745,10 +1725,8 @@
   ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
   int ddepth = dom_depth(proj);
 
-  _igvn.hash_delete(iff);
-  _igvn._worklist.push(iff);
-  _igvn.hash_delete(proj);
-  _igvn._worklist.push(proj);
+  _igvn.rehash_node_delayed(iff);
+  _igvn.rehash_node_delayed(proj);
 
   proj->set_req(0, NULL);  // temporary disconnect
   ProjNode* proj2 = proj_clone(proj, iff);
@@ -1970,9 +1948,7 @@
 
     // clone "n" and insert it between the inputs of "n" and the use outside the loop
     Node* n_clone = n->clone();
-    _igvn.hash_delete(use);
-    use->set_req(j, n_clone);
-    _igvn._worklist.push(use);
+    _igvn.replace_input_of(use, j, n_clone);
     Node* use_c;
     if (!use->is_Phi()) {
       use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
@@ -2028,8 +2004,7 @@
 #endif
     while( worklist.size() ) {
       Node *use = worklist.pop();
-      _igvn.hash_delete(use);
-      _igvn._worklist.push(use);
+      _igvn.rehash_node_delayed(use);
       for (uint j = 1; j < use->req(); j++) {
         if (use->in(j) == n) {
           use->set_req(j, n_clone);
@@ -2055,9 +2030,7 @@
     _igvn.remove_dead_node(phi);
     phi = hit;
   }
-  _igvn.hash_delete(use);
-  _igvn._worklist.push(use);
-  use->set_req(idx, phi);
+  _igvn.replace_input_of(use, idx, phi);
 }
 
 #ifdef ASSERT
@@ -2630,9 +2603,7 @@
               // use is in loop
               if (old_new[use->_idx] != NULL) { // null for dead code
                 Node* use_clone = old_new[use->_idx];
-                _igvn.hash_delete(use);
-                use->set_req(j, C->top());
-                _igvn._worklist.push(use);
+                _igvn.replace_input_of(use, j, C->top());
                 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
               }
             } else {
@@ -2667,46 +2638,35 @@
     if (!n->is_CFG()           && n->in(0) != NULL        &&
         not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
       Node* n_clone = old_new[n->_idx];
-      _igvn.hash_delete(n_clone);
-      n_clone->set_req(0, new_head_clone);
-      _igvn._worklist.push(n_clone);
+      _igvn.replace_input_of(n_clone, 0, new_head_clone);
     }
   }
 
   // Backedge of the surviving new_head (the clone) is original last_peel
-  _igvn.hash_delete(new_head_clone);
-  new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
-  _igvn._worklist.push(new_head_clone);
+  _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
 
   // Cut first node in original not_peel set
-  _igvn.hash_delete(new_head);
-  new_head->set_req(LoopNode::EntryControl, C->top());
-  new_head->set_req(LoopNode::LoopBackControl, C->top());
-  _igvn._worklist.push(new_head);
+  _igvn.rehash_node_delayed(new_head);                     // Multiple edge updates:
+  new_head->set_req(LoopNode::EntryControl,    C->top());  //   use rehash_node_delayed / set_req instead of
+  new_head->set_req(LoopNode::LoopBackControl, C->top());  //   multiple replace_input_of calls
 
   // Copy head_clone back-branch info to original head
   // and remove original head's loop entry and
   // clone head's back-branch
-  _igvn.hash_delete(head);
-  _igvn.hash_delete(head_clone);
-  head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
+  _igvn.rehash_node_delayed(head); // Multiple edge updates
+  head->set_req(LoopNode::EntryControl,    head_clone->in(LoopNode::LoopBackControl));
   head->set_req(LoopNode::LoopBackControl, C->top());
-  head_clone->set_req(LoopNode::LoopBackControl, C->top());
-  _igvn._worklist.push(head);
-  _igvn._worklist.push(head_clone);
+  _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
 
   // Similarly modify the phis
   for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
     Node* use = head->fast_out(k);
     if (use->is_Phi() && use->outcnt() > 0) {
       Node* use_clone = old_new[use->_idx];
-      _igvn.hash_delete(use);
-      _igvn.hash_delete(use_clone);
-      use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
+      _igvn.rehash_node_delayed(use); // Multiple edge updates
+      use->set_req(LoopNode::EntryControl,    use_clone->in(LoopNode::LoopBackControl));
       use->set_req(LoopNode::LoopBackControl, C->top());
-      use_clone->set_req(LoopNode::LoopBackControl, C->top());
-      _igvn._worklist.push(use);
-      _igvn._worklist.push(use_clone);
+      _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
     }
   }
 
@@ -2792,8 +2752,7 @@
       set_ctrl(neg_stride, C->root());
       Node *post = new (C, 3) AddINode( opaq, neg_stride);
       register_new_node( post, u_ctrl );
-      _igvn.hash_delete(use);
-      _igvn._worklist.push(use);
+      _igvn.rehash_node_delayed(use);
       for (uint j = 1; j < use->req(); j++) {
         if (use->in(j) == phi)
           use->set_req(j, post);
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1447,9 +1447,8 @@
   if (!always_slow && _memproj_fallthrough != NULL) {
     for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
       Node *use = _memproj_fallthrough->fast_out(i);
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
-      _igvn._worklist.push(use);
       // back up iterator
       --i;
     }
@@ -1463,9 +1462,8 @@
     }
     for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
       Node *use = _memproj_catchall->fast_out(i);
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
-      _igvn._worklist.push(use);
       // back up iterator
       --i;
     }
@@ -1481,9 +1479,8 @@
   if (_ioproj_fallthrough != NULL) {
     for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
       Node *use = _ioproj_fallthrough->fast_out(i);
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
-      _igvn._worklist.push(use);
       // back up iterator
       --i;
     }
@@ -1497,9 +1494,8 @@
     }
     for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
       Node *use = _ioproj_catchall->fast_out(i);
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
-      _igvn._worklist.push(use);
       // back up iterator
       --i;
     }
@@ -1857,18 +1853,16 @@
       if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
         // Replace Box and mark eliminated all related locks and unlocks.
         alock->set_non_esc_obj();
-        _igvn.hash_delete(alock);
+        _igvn.rehash_node_delayed(alock);
         alock->set_box_node(newbox);
-        _igvn._worklist.push(alock);
         next_edge = false;
       }
     }
     if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
       FastLockNode* flock = u->as_FastLock();
       assert(flock->box_node() == oldbox, "sanity");
-      _igvn.hash_delete(flock);
+      _igvn.rehash_node_delayed(flock);
       flock->set_box_node(newbox);
-      _igvn._worklist.push(flock);
       next_edge = false;
     }
 
@@ -1886,9 +1880,7 @@
           Node* box_node = sfn->monitor_box(jvms, idx);
           if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
             int j = jvms->monitor_box_offset(idx);
-            _igvn.hash_delete(u);
-            u->set_req(j, newbox);
-            _igvn._worklist.push(u);
+            _igvn.replace_input_of(u, j, newbox);
             next_edge = false;
           }
         }
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -717,6 +717,22 @@
         adr = adr->in(1);
         continue;
 
+      case Op_EncodeP:
+        // EncodeP node's control edge could be set by this method
+        // when EncodeP node depends on CastPP node.
+        //
+        // Use its control edge for memory op because EncodeP may go away
+        // later when it is folded with following or preceding DecodeN node.
+        if (adr->in(0) == NULL) {
+          // Keep looking for cast nodes.
+          adr = adr->in(1);
+          continue;
+        }
+        ccp->hash_delete(n);
+        n->set_req(MemNode::Control, adr->in(0));
+        ccp->hash_insert(n);
+        return n;
+
       case Op_CastPP:
         // If the CastPP is useless, just peek on through it.
         if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
--- a/hotspot/src/share/vm/opto/memnode.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -636,17 +636,6 @@
   virtual bool depends_only_on_test() const { return true; }
 };
 
-//------------------------------LoadLLockedNode---------------------------------
-// Load-locked a pointer from memory (either object or array).
-// On Sparc & Intel this is implemented as a normal long load.
-class LoadLLockedNode : public LoadLNode {
-public:
-  LoadLLockedNode( Node *c, Node *mem, Node *adr )
-    : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
-  virtual int Opcode() const;
-  virtual int store_Opcode() const { return Op_StoreLConditional; }
-};
-
 //------------------------------SCMemProjNode---------------------------------------
 // This class defines a projection of the memory  state of a store conditional node.
 // These nodes return a value, but also update memory.
--- a/hotspot/src/share/vm/opto/parse.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -527,6 +527,9 @@
   int     repush_if_args();
   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
                               Block* path, Block* other_path);
+  void    sharpen_type_after_if(BoolTest::mask btest,
+                                Node* con, const Type* tcon,
+                                Node* val, const Type* tval);
   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
   Node*   jump_if_join(Node* iffalse, Node* iftrue);
   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
--- a/hotspot/src/share/vm/opto/parse2.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1233,6 +1233,71 @@
   if (!have_con)                        // remaining adjustments need a con
     return;
 
+  sharpen_type_after_if(btest, con, tcon, val, tval);
+}
+
+
+static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
+  Node* ldk;
+  if (n->is_DecodeN()) {
+    if (n->in(1)->Opcode() != Op_LoadNKlass) {
+      return NULL;
+    } else {
+      ldk = n->in(1);
+    }
+  } else if (n->Opcode() != Op_LoadKlass) {
+    return NULL;
+  } else {
+    ldk = n;
+  }
+  assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
+
+  Node* adr = ldk->in(MemNode::Address);
+  intptr_t off = 0;
+  Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
+  if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
+    return NULL;
+  const TypePtr* tp = gvn->type(obj)->is_ptr();
+  if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
+    return NULL;
+
+  return obj;
+}
+
+void Parse::sharpen_type_after_if(BoolTest::mask btest,
+                                  Node* con, const Type* tcon,
+                                  Node* val, const Type* tval) {
+  // Look for opportunities to sharpen the type of a node
+  // whose klass is compared with a constant klass.
+  if (btest == BoolTest::eq && tcon->isa_klassptr()) {
+    Node* obj = extract_obj_from_klass_load(&_gvn, val);
+    const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
+    if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
+       // Found:
+       //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
+       // or the narrowOop equivalent.
+       const Type* obj_type = _gvn.type(obj);
+       const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
+       if (tboth != NULL && tboth != obj_type && tboth->higher_equal(obj_type)) {
+          // obj has to be of the exact type Foo if the CmpP succeeds.
+          assert(tboth->klass_is_exact(), "klass should be exact");
+          int obj_in_map = map()->find_edge(obj);
+          JVMState* jvms = this->jvms();
+          if (obj_in_map >= 0 &&
+              (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
+            TypeNode* ccast = new (C, 2) CheckCastPPNode(control(), obj, tboth);
+            const Type* tcc = ccast->as_Type()->type();
+            assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
+            // Delay transform() call to allow recovery of pre-cast value
+            // at the control merge.
+            _gvn.set_type_bottom(ccast);
+            record_for_igvn(ccast);
+            // Here's the payoff.
+            replace_in_map(obj, ccast);
+          }
+       }
+    }
+  }
 
   int val_in_map = map()->find_edge(val);
   if (val_in_map < 0)  return;          // replace_in_map would be useless
@@ -1265,6 +1330,7 @@
         // Exclude tests vs float/double 0 as these could be
         // either +0 or -0.  Just because you are equal to +0
         // doesn't mean you ARE +0!
+        // Note, following code also replaces Long and Oop values.
         if ((!tf || tf->_f != 0.0) &&
             (!td || td->_d != 0.0))
           cast = con;                   // Replace non-constant val by con.
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -460,6 +460,25 @@
     subsume_node(old, nn);
   }
 
+  // Delayed node rehash: remove a node from the hash table and rehash it during
+  // next optimizing pass
+  void rehash_node_delayed(Node* n) {
+    hash_delete(n);
+    _worklist.push(n);
+  }
+
+  // Replace ith edge of "n" with "in"
+  void replace_input_of(Node* n, int i, Node* in) {
+    rehash_node_delayed(n);
+    n->set_req(i, in);
+  }
+
+  // Delete ith edge of "n"
+  void delete_input_of(Node* n, int i) {
+    rehash_node_delayed(n);
+    n->del_req(i);
+  }
+
   bool delay_transform() const { return _delay_transform; }
 
   void set_delay_transform(bool delay) {
--- a/hotspot/src/share/vm/opto/split_if.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/split_if.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -137,9 +137,7 @@
             Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
             Node *x = bol->clone();
             register_new_node(x, iff_ctrl);
-            _igvn.hash_delete(iff);
-            iff->set_req(1, x);
-            _igvn._worklist.push(iff);
+            _igvn.replace_input_of(iff, 1, x);
           }
           _igvn.remove_dead_node( bol );
           --i;
@@ -151,9 +149,7 @@
         assert( bol->in(1) == n, "" );
         Node *x = n->clone();
         register_new_node(x, get_ctrl(bol));
-        _igvn.hash_delete(bol);
-        bol->set_req(1, x);
-        _igvn._worklist.push(bol);
+        _igvn.replace_input_of(bol, 1, x);
       }
       _igvn.remove_dead_node( n );
 
@@ -387,9 +383,7 @@
     if( use->in(i) == def )
       break;
   assert( i < use->req(), "def should be among use's inputs" );
-  _igvn.hash_delete(use);
-  use->set_req(i, new_def);
-  _igvn._worklist.push(use);
+  _igvn.replace_input_of(use, i, new_def);
 }
 
 //------------------------------do_split_if------------------------------------
--- a/hotspot/src/share/vm/opto/subnode.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -702,12 +702,84 @@
     return TypeInt::CC;
 }
 
+static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
+  // Return the klass node for
+  //   LoadP(AddP(foo:Klass, #java_mirror))
+  //   or NULL if not matching.
+  if (n->Opcode() != Op_LoadP) return NULL;
+
+  const TypeInstPtr* tp = phase->type(n)->isa_instptr();
+  if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL;
+
+  Node* adr = n->in(MemNode::Address);
+  intptr_t off = 0;
+  Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
+  if (k == NULL)  return NULL;
+  const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
+  if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return NULL;
+
+  // We've found the klass node of a Java mirror load.
+  return k;
+}
+
+static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
+  // for ConP(Foo.class) return ConP(Foo.klass)
+  // otherwise return NULL
+  if (!n->is_Con()) return NULL;
+
+  const TypeInstPtr* tp = phase->type(n)->isa_instptr();
+  if (!tp) return NULL;
+
+  ciType* mirror_type = tp->java_mirror_type();
+  // TypeInstPtr::java_mirror_type() returns non-NULL for compile-
+  // time Class constants only.
+  if (!mirror_type) return NULL;
+
+  // x.getClass() == int.class can never be true (for all primitive types)
+  // Return a ConP(NULL) node for this case.
+  if (mirror_type->is_classless()) {
+    return phase->makecon(TypePtr::NULL_PTR);
+  }
+
+  // return the ConP(Foo.klass)
+  assert(mirror_type->is_klass(), "mirror_type should represent a klassOop");
+  return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
+}
+
 //------------------------------Ideal------------------------------------------
-// Check for the case of comparing an unknown klass loaded from the primary
+// Normalize comparisons between Java mirror loads to compare the klass instead.
+//
+// Also check for the case of comparing an unknown klass loaded from the primary
 // super-type array vs a known klass with no subtypes.  This amounts to
 // checking to see an unknown klass subtypes a known klass with no subtypes;
 // this only happens on an exact match.  We can shorten this test by 1 load.
 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
+  // Normalize comparisons between Java mirrors into comparisons of the low-
+  // level klass, where a dependent load could be shortened.
+  //
+  // The new pattern has a nice effect of matching the same pattern used in the
+  // fast path of instanceof/checkcast/Class.isInstance(), which allows
+  // redundant exact type check be optimized away by GVN.
+  // For example, in
+  //   if (x.getClass() == Foo.class) {
+  //     Foo foo = (Foo) x;
+  //     // ... use a ...
+  //   }
+  // a CmpPNode could be shared between if_acmpne and checkcast
+  {
+    Node* k1 = isa_java_mirror_load(phase, in(1));
+    Node* k2 = isa_java_mirror_load(phase, in(2));
+    Node* conk2 = isa_const_java_mirror(phase, in(2));
+
+    if (k1 && (k2 || conk2)) {
+      Node* lhs = k1;
+      Node* rhs = (k2 != NULL) ? k2 : conk2;
+      this->set_req(1, lhs);
+      this->set_req(2, rhs);
+      return this;
+    }
+  }
+
   // Constant pointer on right?
   const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
   if (t2 == NULL || !t2->klass_is_exact())
--- a/hotspot/src/share/vm/opto/superword.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -944,7 +944,7 @@
 void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
                                   Node *uip, Unique_Node_List &sched_before) {
   Node* my_mem = current->in(MemNode::Memory);
-  _igvn.hash_delete(current);
+  _igvn.rehash_node_delayed(current);
   _igvn.hash_delete(my_mem);
 
   //remove current_store from its current position in the memmory graph
@@ -952,7 +952,7 @@
     Node* use = current->out(i);
     if (use->is_Mem()) {
       assert(use->in(MemNode::Memory) == current, "must be");
-      _igvn.hash_delete(use);
+      _igvn.rehash_node_delayed(use);
       if (use == prev) { // connect prev to my_mem
         use->set_req(MemNode::Memory, my_mem);
       } else if (sched_before.member(use)) {
@@ -962,7 +962,6 @@
         _igvn.hash_delete(lip);
         use->set_req(MemNode::Memory, lip);
       }
-      _igvn._worklist.push(use);
       --i; //deleted this edge; rescan position
     }
   }
@@ -976,25 +975,20 @@
     Node* use = insert_pt->out(i);
     if (use->is_Mem()) {
       assert(use->in(MemNode::Memory) == insert_pt, "must be");
-      _igvn.hash_delete(use);
-      use->set_req(MemNode::Memory, current);
-      _igvn._worklist.push(use);
+      _igvn.replace_input_of(use, MemNode::Memory, current);
       --i; //deleted this edge; rescan position
     } else if (!sched_up && use->is_Phi() && use->bottom_type() == Type::MEMORY) {
       uint pos; //lip (lower insert point) must be the last one in the memory slice
-      _igvn.hash_delete(use);
       for (pos=1; pos < use->req(); pos++) {
         if (use->in(pos) == insert_pt) break;
       }
-      use->set_req(pos, current);
-      _igvn._worklist.push(use);
+      _igvn.replace_input_of(use, pos, current);
       --i;
     }
   }
 
   //connect current to insert_pt
   current->set_req(MemNode::Memory, insert_pt);
-  _igvn._worklist.push(current);
 }
 
 //------------------------------co_locate_pack----------------------------------
@@ -1077,15 +1071,13 @@
           Node* use = current->out(i);
           if (use->is_Mem() && use != previous) {
             assert(use->in(MemNode::Memory) == current, "must be");
-            _igvn.hash_delete(use);
             if (schedule_before_pack.member(use)) {
               _igvn.hash_delete(upper_insert_pt);
-              use->set_req(MemNode::Memory, upper_insert_pt);
+              _igvn.replace_input_of(use, MemNode::Memory, upper_insert_pt);
             } else {
               _igvn.hash_delete(lower_insert_pt);
-              use->set_req(MemNode::Memory, lower_insert_pt);
+              _igvn.replace_input_of(use, MemNode::Memory, lower_insert_pt);
             }
-            _igvn._worklist.push(use);
             --i; // deleted this edge; rescan position
           }
         }
@@ -1122,9 +1114,7 @@
     // Give each load the same memory state
     for (uint i = 0; i < pk->size(); i++) {
       LoadNode* ld = pk->at(i)->as_Load();
-      _igvn.hash_delete(ld);
-      ld->set_req(MemNode::Memory, mem_input);
-      _igvn._worklist.push(ld);
+      _igvn.replace_input_of(ld, MemNode::Memory, mem_input);
     }
   }
 }
@@ -1282,16 +1272,14 @@
 
     // Insert extract operation
     _igvn.hash_delete(def);
-    _igvn.hash_delete(use);
     int def_pos = alignment(def) / data_size(def);
     const Type* def_t = velt_type(def);
 
     Node* ex = ExtractNode::make(_phase->C, def, def_pos, def_t);
     _phase->_igvn.register_new_node_with_optimizer(ex);
     _phase->set_ctrl(ex, _phase->get_ctrl(def));
-    use->set_req(idx, ex);
+    _igvn.replace_input_of(use, idx, ex);
     _igvn._worklist.push(def);
-    _igvn._worklist.push(use);
 
     bb_insert_after(ex, bb_idx(def));
     set_velt_type(ex, def_t);
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -378,6 +378,7 @@
   jclass cls = NULL;
   DT_RETURN_MARK(DefineClass, jclass, (const jclass&)cls);
 
+  TempNewSymbol class_name = NULL;
   // Since exceptions can be thrown, class initialization can take place
   // if name is NULL no check for class name in .class stream has to be made.
   if (name != NULL) {
@@ -387,9 +388,8 @@
       // into the constant pool.
       THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), name);
     }
+    class_name = SymbolTable::new_symbol(name, CHECK_NULL);
   }
-  TempNewSymbol class_name = SymbolTable::new_symbol(name, THREAD);
-
   ResourceMark rm(THREAD);
   ClassFileStream st((u1*) buf, bufLen, NULL);
   Handle class_loader (THREAD, JNIHandles::resolve(loaderRef));
--- a/hotspot/src/share/vm/prims/jvm.h	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/prims/jvm.h	Wed Jul 05 18:13:47 2017 +0200
@@ -634,7 +634,7 @@
 JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);
 
 /*
- * sun.misc.AtomicLong
+ * java.util.concurrent.atomic.AtomicLong
  */
 JNIEXPORT jboolean JNICALL
 JVM_SupportsCX8(void);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -631,9 +631,6 @@
   develop(bool, InlineClassNatives, true,                                   \
           "inline Class.isInstance, etc")                                   \
                                                                             \
-  develop(bool, InlineAtomicLong, true,                                     \
-          "inline sun.misc.AtomicLong")                                     \
-                                                                            \
   develop(bool, InlineThreadNatives, true,                                  \
           "inline Thread.currentThread, etc")                               \
                                                                             \
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 18:13:47 2017 +0200
@@ -358,7 +358,6 @@
   nonstatic_field(methodDataOopDesc,           _arg_stack,                                    intx)                                  \
   nonstatic_field(methodDataOopDesc,           _arg_returned,                                 intx)                                  \
   nonstatic_field(methodOopDesc,               _constMethod,                                  constMethodOop)                        \
-  nonstatic_field(methodOopDesc,               _constants,                                    constantPoolOop)                       \
   nonstatic_field(methodOopDesc,               _method_data,                                  methodDataOop)                         \
   nonstatic_field(methodOopDesc,               _interpreter_invocation_count,                 int)                                   \
   nonstatic_field(methodOopDesc,               _access_flags,                                 AccessFlags)                           \
@@ -378,7 +377,7 @@
   volatile_nonstatic_field(methodOopDesc,      _from_compiled_entry,                          address)                               \
   volatile_nonstatic_field(methodOopDesc,      _from_interpreted_entry,                       address)                               \
   volatile_nonstatic_field(constMethodOopDesc, _fingerprint,                                  uint64_t)                              \
-  nonstatic_field(constMethodOopDesc,          _method,                                       methodOop)                             \
+  nonstatic_field(constMethodOopDesc,          _constants,                                    constantPoolOop)                       \
   nonstatic_field(constMethodOopDesc,          _stackmap_data,                                typeArrayOop)                          \
   nonstatic_field(constMethodOopDesc,          _exception_table,                              typeArrayOop)                          \
   nonstatic_field(constMethodOopDesc,          _constMethod_size,                             int)                                   \
@@ -1876,7 +1875,6 @@
   declare_c2_type(StoreNNode, StoreNode)                                  \
   declare_c2_type(StoreCMNode, StoreNode)                                 \
   declare_c2_type(LoadPLockedNode, LoadPNode)                             \
-  declare_c2_type(LoadLLockedNode, LoadLNode)                             \
   declare_c2_type(SCMemProjNode, ProjNode)                                \
   declare_c2_type(LoadStoreNode, Node)                                    \
   declare_c2_type(StorePConditionalNode, LoadStoreNode)                   \
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Jun 14 13:14:50 2012 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Wed Jul 05 18:13:47 2017 +0200
@@ -220,9 +220,15 @@
 #define PRIu64       "I64u"
 #define PRIx64       "I64x"
 
+#ifdef _LP64
+#define PRIdPTR       "I64d"
+#define PRIuPTR       "I64u"
+#define PRIxPTR       "I64x"
+#else
 #define PRIdPTR       "d"
 #define PRIuPTR       "u"
 #define PRIxPTR       "x"
+#endif
 
 #define offset_of(klass,field) offsetof(klass,field)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6732154/Test6732154.java	Wed Jul 05 18:13:47 2017 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6732154
+ * @summary REG: Printing an Image using image/gif doc flavor crashes the VM, Solsparc
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly="Test6732154::ascii85Encode" Test6732154
+ */
+public class Test6732154 {
+
+    // Exact copy of sun.print.PSPrinterJob.ascii85Encode([b)[b
+    private byte[] ascii85Encode(byte[] inArr) {
+        byte[]  outArr = new byte[((inArr.length+4) * 5 / 4) + 2];
+        long p1 = 85;
+        long p2 = p1*p1;
+        long p3 = p1*p2;
+        long p4 = p1*p3;
+        byte pling = '!';
+
+        int i = 0;
+        int olen = 0;
+        long val, rem;
+
+        while (i+3 < inArr.length) {
+            val = ((long)((inArr[i++]&0xff))<<24) +
+                  ((long)((inArr[i++]&0xff))<<16) +
+                  ((long)((inArr[i++]&0xff))<< 8) +
+                  ((long)(inArr[i++]&0xff));
+            if (val == 0) {
+                outArr[olen++] = 'z';
+            } else {
+                rem = val;
+                outArr[olen++] = (byte)(rem / p4 + pling); rem = rem % p4;
+                outArr[olen++] = (byte)(rem / p3 + pling); rem = rem % p3;
+                outArr[olen++] = (byte)(rem / p2 + pling); rem = rem % p2;
+                outArr[olen++] = (byte)(rem / p1 + pling); rem = rem % p1;
+                outArr[olen++] = (byte)(rem + pling);
+            }
+        }
+        // input not a multiple of 4 bytes, write partial output.
+        if (i < inArr.length) {
+            int n = inArr.length - i; // n bytes remain to be written
+
+            val = 0;
+            while (i < inArr.length) {
+                val = (val << 8) + (inArr[i++]&0xff);
+            }
+
+            int append = 4 - n;
+            while (append-- > 0) {
+                val = val << 8;
+            }
+            byte []c = new byte[5];
+            rem = val;
+            c[0] = (byte)(rem / p4 + pling); rem = rem % p4;
+            c[1] = (byte)(rem / p3 + pling); rem = rem % p3;
+            c[2] = (byte)(rem / p2 + pling); rem = rem % p2;
+            c[3] = (byte)(rem / p1 + pling); rem = rem % p1;
+            c[4] = (byte)(rem + pling);
+
+            for (int b = 0; b < n+1 ; b++) {
+                outArr[olen++] = c[b];
+            }
+        }
+
+        // write EOD marker.
+        outArr[olen++]='~'; outArr[olen++]='>';
+
+        /* The original intention was to insert a newline after every 78 bytes.
+         * This was mainly intended for legibility but I decided against this
+         * partially because of the (small) amount of extra space, and
+         * partially because for line breaks either would have to hardwire
+         * ascii 10 (newline) or calculate space in bytes to allocate for
+         * the platform's newline byte sequence. Also need to be careful
+         * about where its inserted:
+         * Ascii 85 decoder ignores white space except for one special case:
+         * you must ensure you do not split the EOD marker across lines.
+         */
+        byte[] retArr = new byte[olen];
+        System.arraycopy(outArr, 0, retArr, 0, olen);
+        return retArr;
+    }
+
+    public static void main(String[] args) {
+        new Test6732154().ascii85Encode(new byte[0]);
+        System.out.println("Test passed.");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7169782/Test7169782.java	Wed Jul 05 18:13:47 2017 +0200
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7169782
+ * @summary C2: SIGSEGV in LShiftLNode::Ideal(PhaseGVN*, bool)
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly="Test7169782::<clinit>" Test7169782
+ */
+
+public class Test7169782 {
+    static long var_8;
+
+    static {
+        var_8 /= (long)(1E100 + ("".startsWith("a", 0) ? 1 : 2));
+    }
+
+    public static void main(String[] args) {
+        System.out.println("Test passed.");
+    }
+}
--- a/jaxp/.hgtags	Thu Jun 14 13:14:50 2012 -0700
+++ b/jaxp/.hgtags	Wed Jul 05 18:13:47 2017 +0200
@@ -164,3 +164,4 @@
 9ecfdbd6aed4702674eaede2023b4a19513d6b36 jdk8-b40
 6f5c0e17415de7a9c74900ef4ba12f47accdf88b jdk8-b41
 39ee03c1602155ff02e5feb6cd44869452f24cf7 jdk8-b42
+eff4ece9c8bc43b3ce2b3758574c4c20147f0689 jdk8-b43
--- a/jaxws/.hgtags	Thu Jun 14 13:14:50 2012 -0700
+++ b/jaxws/.hgtags	Wed Jul 05 18:13:47 2017 +0200
@@ -164,3 +164,4 @@
 09a0ddda03cb36deb6ee9edf789da12aa4674c6b jdk8-b40
 f2072b164b0519227833a2994f78e3988ee67827 jdk8-b41
 1f20f37818a91b66eaeba268d0b785718598ee0e jdk8-b42
+f00c12994562c2f68d348a7c3e829374a89294e2 jdk8-b43
--- a/jdk/.hgtags	Thu Jun 14 13:14:50 2012 -0700
+++ b/jdk/.hgtags	Wed Jul 05 18:13:47 2017 +0200
@@ -164,3 +164,4 @@
 b88fc3359dc7edabfa8a228855d8cebf8843c055 jdk8-b40
 4eac56f073ea8179b1a35fcd2af9b48b0088be9f jdk8-b41
 cf5c1f6fbc5ba14163fe0ef8eb8601b33f951372 jdk8-b42
+b3246687c3695dff6f461bb407f9db88f7d072e7 jdk8-b43
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/scripts/lic_check.sh	Wed Jul 05 18:13:47 2017 +0200
@@ -0,0 +1,224 @@
+#! /bin/sh -f
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+#
+# This script checks a copyright notice.
+#
+# The script should be located in the main jdk repository under make/scripts.
+# It works with the templates in the make/templates directory of the jdk source.
+#
+# Usage: "lic_check.sh [-gpl] or [-gplcp] or [-bsd] file(s)"
+
+script_directory=`dirname $0`
+script_name=`basename $0`
+first_option=$1
+
+# parse the first argument
+
+case "$1" in
+	"-gpl")
+		header="gpl-header"
+		;;
+	"-gplcp")
+		header="gpl-cp-header"
+		;;
+	"-bsd")
+		header="bsd-header"
+                ;;
+	*)
+		echo "Usage: $0 [-gpl] or [-gplcp] or [-bsd] file(s)" 1>&2
+		exit 1
+		;;
+esac
+shift
+
+#initialize error status
+error_status=0
+
+# determine and set the absolute path for the script directory
+D=`dirname "${script_directory}"`
+B=`basename "${script_directory}"`
+script_dir="`cd \"${D}\" 2>/dev/null && pwd || echo \"${D}\"`/${B}"
+
+# set up a variable for the templates directory
+template_dir=${script_dir}/../templates
+
+# Check existence of the template directory.
+if [ ! -d ${template_dir} ] ; then
+        echo "ERROR: The templates directory "${template_dir}" doesn't exist." 1>&2
+        exit 1
+fi
+
+# set the temporary file location
+tmpfile=/tmp/source_file.$$
+rm -f ${tmpfile}
+
+# check number of lines in the template file
+lines=`cat ${template_dir}/${header} | wc -l`
+
+# the template file has one empty line at the end, we need to ignore it
+lines=`expr ${lines} - 1`
+
+# A loop through the all script parameters:
+#
+# 1. Given a set of source files and a license template header, read a file name of each source file.
+# 2. Check if a given file exists. When a directory is encountered, dive in and process all sources in those directories.
+# 3. Read each line of the given file and check it for a copyright string.
+# 4. If a copyright string found, check the correctness of the years format in the string and replace years with %YEARS%.
+# 5. Continue reading the file until the number of lines is equal to the length of the license template header ($lines) and remove a comment prefix for each line.
+# 6. Store the result (the license header from a given file) into a temporary file.
+# 7. If a temporary file is not empty, compare it with a template file to verify if the license text is the same as in a template.
+# 8. Produce a error in case a temporary file is empty, it means we didn't find a copyright string, or it's not correct
+#
+while [ "$#" -gt "0" ] ; do
+	touch ${tmpfile}
+
+	# In case of the directory as a parameter check recursively every file inside.
+	if [ -d $1 ] ; then
+		curdir=`pwd`
+		cd $1
+		echo "*** Entering directory: "`pwd`
+		echo "***"
+		files=`ls .`
+		sh ${script_dir}/${script_name} ${first_option} ${files}
+		status=$?
+		if [ ${error_status} -ne 1 ] ; then
+			error_status=${status}
+		fi
+		cd ${curdir}
+		shift
+		continue
+	else
+		echo "### Checking copyright notice in the file: "$1
+		echo "###"
+	fi
+
+	# Check the existence of the source file.
+	if [ ! -f $1 ] ; then
+        	echo "ERROR: The source file "$1" doesn't exist." 1>&2
+		error_status=1
+		shift
+        	continue
+	fi
+
+	# read the source file and determine where the header starts, then get license header without prefix
+	counter=0
+	while read line ; do
+		# remove windows "line feed" character from the line (if any)
+		line=`echo "${line}" | tr -d '\r'`
+		# check if the given line contains copyright
+		check_copyright=`echo "${line}" | grep "Copyright (c) "`
+		if [ "${check_copyright}" != "" ] ; then
+			# determine the comment prefix
+			prefix=`echo "${line}" | cut -d "C" -f 1`
+			# remove prefix (we use "_" as a sed delimiter, since the prefix could be like //)
+			copyright_without_prefix=`echo "${line}" | sed s_"^${prefix}"__g`
+			# copyright years
+			year1=`echo "${copyright_without_prefix}" | cut -d " " -f 3`
+			year2=`echo "${copyright_without_prefix}" | cut -d " " -f 4`
+			# Processing the first year in the copyright string
+			length=`expr "${year1}" : '.*'`
+			if [ ${length} -ne 5 ] ; then
+        			break
+			fi
+			check_year1=`echo ${year1} | egrep "19[0-9][0-9],|2[0-9][0-9][0-9],"`
+			if [ "${check_year1}" = "" ] ; then
+        			break
+			fi
+			# Processing the second year in the copyright string
+			if [ "${year2}" != "Oracle" ] ; then
+        			length=`expr "${year2}" : '.*'`
+        			if [ ${length} -ne 5 ] ; then
+                			break
+        			else
+                			check_year2=`echo ${year2} | egrep "19[0-9][0-9],|2[0-9][0-9][0-9],"`
+                			if [ "${check_year2}" = "" ] ; then
+                        			break
+                			fi
+        			fi
+			fi
+
+			# copyright string without copyright years
+			no_years=`echo "${copyright_without_prefix}" | sed 's/[0-9,]*//g'`
+			# copyright string before years
+			before_years=`echo "${no_years}" | cut -d "O" -f 1`
+			# copyright string after years
+			after_years=`echo "${no_years}" | cut -d ")" -f 2`
+			# form a new copyright string with %YEARS%
+			new_copyright=`echo ${before_years}"%YEARS%"${after_years}`
+			# save the new copyright string to a file
+			echo "${new_copyright}" > ${tmpfile}
+			# start counting the lines
+                       	counter=1
+			# move to the next line
+			continue
+		fi
+		if [ ${counter} -ne 0 ] ; then
+			# this should be a license header line, hence increment counter
+			counter=`expr ${counter} + 1`
+			# record a string without a prefix to a file
+			newline=`echo "${line}" | sed s_"^${prefix}"__`
+
+			# we need to take care of the empty lines in the header, i.e. check the prefix without spaces
+			trimmed_prefix=`echo "${prefix}" | tr -d " "`
+			trimmed_line=`echo "${line}"  | tr -d " "`
+			if [ "${trimmed_line}" = "${trimmed_prefix}" ] ; then
+				echo "" >> ${tmpfile}
+			else
+				echo "${newline}" >> ${tmpfile}
+			fi
+		fi
+		# stop reading lines when a license header ends and add an empty line to the end
+		if [ ${counter} -eq ${lines} ] ; then
+			echo "" >> ${tmpfile}
+			break
+		fi
+	done < $1
+
+	# compare the license header with a template file
+	if [ -s ${tmpfile} ] ; then
+		diff -c ${tmpfile} ${template_dir}/${header} 1>&2
+		if [ "$?" = "0" ] ; then
+			echo "SUCCESS: The license header for "`pwd`"/"$1" has been verified."
+			echo "###"
+		else
+			echo "ERROR: License header is not correct in "`pwd`"/"$1 1>&2
+			echo "See diffs above. " 1>&2
+			echo "###" 1>&2
+			echo "" 1>&2
+			error_status=1
+		fi
+	else
+		# If we don't have a temporary file, there is a problem with a copyright string (or no copyright string)
+		echo "ERROR: Copyright string is not correct or missing in "`pwd`"/"$1 1>&2
+		echo "###" 1>&2
+		echo "" 1>&2
+		error_status=1
+	fi
+	rm -f ${tmpfile}
+	shift
+done
+if [ ${error_status} -ne 0 ] ; then
+	exit 1
+fi