Merge
authorsspitsyn
Wed, 03 May 2017 02:32:02 +0000
changeset 46426 02a1fc064144
parent 46425 2f733d553ca0 (current diff)
parent 46424 2dfc07162b35 (diff)
child 46427 54713555867e
Merge
--- a/hotspot/.hgtags	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/.hgtags	Wed May 03 02:32:02 2017 +0000
@@ -560,6 +560,7 @@
 a9fdfd55835ef9dccb7f317b07249bd66653b874 jdk-9+154
 f3b3d77a1751897413aae43ac340a130b6fa2ae1 jdk-9+155
 43139c588ea48b6504e52b6c3dec530b17b1fdb4 jdk-9+156
+1ea217626ba0995dd03127f8322ba3687926a085 jdk-10+1
 b2d0a906afd73dcf27f572217eb1be0f196ec16c jdk-9+157
 4e78f30935229f13ce7c43089621cf7169f5abac jdk-9+158
 9211c2e89c1cd11ec2d5752b0f97131a7d7525c7 jdk-9+159
@@ -570,3 +571,4 @@
 0af429be8bbaeaaf0cb838e9af28c953dda6a9c8 jdk-9+164
 c92c6416ca03b1464d5ed99cf6201e52b5ba0a70 jdk-9+165
 560d7aa083a24b6a56443feb8de0f40435d33aa9 jdk-9+166
+48809c513ed5ebb4d4dbf2f454afcce2780db6db jdk-10+2
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed May 03 02:32:02 2017 +0000
@@ -968,10 +968,11 @@
         // than prefetch distance.
         __ set(prefetch_count, O4);
         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
-        __ sub(count, prefetch_count, count);
+        __ sub(count, O4, count);
 
         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
-        __ add(count, prefetch_count, count); // restore count
+        __ set(prefetch_count, O4);
+        __ add(count, O4, count);
 
       } // prefetch_count > 0
 
@@ -992,11 +993,12 @@
       // than prefetch distance.
       __ set(prefetch_count, O4);
       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
-      __ sub(count, prefetch_count, count);
+      __ sub(count, O4, count);
 
       Label L_copy_prefetch;
       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
-      __ add(count, prefetch_count, count); // restore count
+      __ set(prefetch_count, O4);
+      __ add(count, O4, count);
 
     } // prefetch_count > 0
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/AltHashing.java	Wed May 03 02:32:02 2017 +0000
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+public class AltHashing {
+    public static long murmur3_32(long seed, byte[] data) {
+      long h1 = seed;
+      int len = data.length;
+      int count = len;
+
+      int offset = 0;
+
+      // body
+      while (count >= 4) {
+          long k1 = (data[offset] & 0x0FF)
+              | (data[offset + 1] & 0x0FF) << 8
+              | (data[offset + 2] & 0x0FF) << 16
+              | data[offset + 3] << 24;
+
+          count -= 4;
+          offset += 4;
+
+          k1 *= 0xcc9e2d51;
+          k1 = Integer.rotateLeft((int)k1, 15);
+          k1 *= 0x1b873593;
+          k1 &= 0xFFFFFFFFL;
+
+          h1 ^= k1;
+          h1 = Integer.rotateLeft((int)h1, 13);
+          h1 = h1 * 5 + 0xe6546b64;
+          h1 &= 0xFFFFFFFFL;
+      }
+
+      //tail
+      if (count > 0) {
+          long k1 = 0;
+
+          switch (count) {
+              case 3:
+                  k1 ^= (data[offset + 2] & 0xff) << 16;
+                  // fall through
+              case 2:
+                  k1 ^= (data[offset + 1] & 0xff) << 8;
+                  // fall through
+              case 1:
+                  k1 ^= (data[offset] & 0xff);
+                  // fall through
+              default:
+                  k1 *= 0xcc9e2d51;
+                  k1 = Integer.rotateLeft((int)k1, 15);
+                  k1 *= 0x1b873593;
+                  k1 &= 0xFFFFFFFFL;
+                  h1 ^= k1;
+                  h1 &= 0xFFFFFFFFL;
+          }
+      }
+
+      // finalization
+      h1 ^= len;
+
+      // finalization mix force all bits of a hash block to avalanche
+      h1 ^= h1 >> 16;
+      h1 *= 0x85ebca6b;
+      h1 &= 0xFFFFFFFFL;
+      h1 ^= h1 >> 13;
+      h1 *= 0xc2b2ae35;
+      h1 &= 0xFFFFFFFFL;
+      h1 ^= h1 >> 16;
+
+      return h1 & 0xFFFFFFFFL;
+  }
+}
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,11 +45,14 @@
     Type type = db.lookupType("SymbolTable");
     theTableField  = type.getAddressField("_the_table");
     sharedTableField = type.getAddressField("_shared_table");
+    type = db.lookupType("RehashableSymbolHashtable");
+    seedField = type.getCIntegerField("_seed");
   }
 
   // Fields
   private static AddressField theTableField;
   private static AddressField sharedTableField;
+  private static CIntegerField seedField;
 
   private CompactHashTable sharedTable;
 
@@ -62,6 +65,17 @@
     return table;
   }
 
+  public static long getSeed() {
+      return (long) seedField.getValue();
+  }
+
+  public static boolean useAlternateHashcode() {
+      if (getSeed() != 0) {
+          return true;
+      }
+      return false;
+  }
+
   public SymbolTable(Address addr) {
     super(addr);
   }
@@ -86,11 +100,17 @@
   public Symbol probe(byte[] name) {
     long hashValue = hashSymbol(name);
 
+    // shared table does not use alternate hashing algorithm,
+    // it always uses the same original hash code.
     Symbol s = sharedTable.probe(name, hashValue);
     if (s != null) {
       return s;
     }
 
+    if (useAlternateHashcode()) {
+        hashValue = AltHashing.murmur3_32(getSeed(), name);
+    }
+
     for (HashtableEntry e = (HashtableEntry) bucket(hashToIndex(hashValue)); e != null; e = (HashtableEntry) e.next()) {
       if (e.hash() == hashValue) {
          Symbol sym = Symbol.create(e.literalValue());
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,23 +42,18 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+  __asm__ volatile (  "lock xaddl %0,(%2)"
                     : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "0" (addend), "r" (dest)
                     : "cc", "memory");
   return addend + add_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
+  __asm__ volatile (  "lock addl $1,(%0)" :
+                    : "r" (dest) : "cc", "memory");
 }
 
 inline void Atomic::inc_ptr(volatile void*     dest) {
@@ -66,9 +61,8 @@
 }
 
 inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
+  __asm__ volatile (  "lock subl $1,(%0)" :
+                    : "r" (dest) : "cc", "memory");
 }
 
 inline void Atomic::dec_ptr(volatile void*     dest) {
@@ -89,19 +83,17 @@
 
 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+  __asm__ volatile (  "lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
   return exchange_value;
 }
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+  __asm__ volatile (  "lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
   return exchange_value;
 }
@@ -112,10 +104,9 @@
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+  __asm__ __volatile__ (  "lock xaddq %0,(%2)"
                         : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "0" (addend), "r" (dest)
                         : "cc", "memory");
   return addend + add_value;
 }
@@ -125,18 +116,16 @@
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+  __asm__ __volatile__ (  "lock addq $1,(%0)"
                         :
-                        : "r" (dest), "r" (mp)
+                        : "r" (dest)
                         : "cc", "memory");
 }
 
 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+  __asm__ __volatile__ (  "lock subq $1,(%0)"
                         :
-                        : "r" (dest), "r" (mp)
+                        : "r" (dest)
                         : "cc", "memory");
 }
 
@@ -149,10 +138,9 @@
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+  __asm__ __volatile__ (  "lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest)
                         : "cc", "memory");
   return exchange_value;
 }
--- a/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,23 +42,18 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
   jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+  __asm__ volatile (  "lock xaddl %0,(%2)"
                     : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "0" (addend), "r" (dest)
                     : "cc", "memory");
   return addend + add_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
+  __asm__ volatile (  "lock addl $1,(%0)" :
+                    : "r" (dest) : "cc", "memory");
 }
 
 inline void Atomic::inc_ptr(volatile void*     dest) {
@@ -66,9 +61,8 @@
 }
 
 inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
+  __asm__ volatile (  "lock subl $1,(%0)" :
+                    : "r" (dest) : "cc", "memory");
 }
 
 inline void Atomic::dec_ptr(volatile void*     dest) {
@@ -89,19 +83,17 @@
 
 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+  __asm__ volatile ("lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
   return exchange_value;
 }
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+  __asm__ volatile ("lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
   return exchange_value;
 }
@@ -112,10 +104,9 @@
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+  __asm__ __volatile__ ("lock xaddq %0,(%2)"
                         : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "0" (addend), "r" (dest)
                         : "cc", "memory");
   return addend + add_value;
 }
@@ -125,18 +116,16 @@
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+  __asm__ __volatile__ ("lock addq $1,(%0)"
                         :
-                        : "r" (dest), "r" (mp)
+                        : "r" (dest)
                         : "cc", "memory");
 }
 
 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+  __asm__ __volatile__ ("lock subq $1,(%0)"
                         :
-                        : "r" (dest), "r" (mp)
+                        : "r" (dest)
                         : "cc", "memory");
 }
 
@@ -149,10 +138,9 @@
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+  __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest)
                         : "cc", "memory");
   return exchange_value;
 }
@@ -192,12 +180,12 @@
 
 extern "C" {
   // defined in linux_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
   void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
 }
 
 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -614,8 +614,7 @@
 
         # Support for jlong Atomic::cmpxchg(jlong exchange_value,
         #                                   volatile jlong* dest,
-        #                                   jlong compare_value,
-        #                                   bool is_MP)
+        #                                   jlong compare_value)
         #
         .p2align 4,,15
 	.type    _Atomic_cmpxchg_long,@function
@@ -628,10 +627,7 @@
         movl     24(%esp), %eax    # 24(%esp) : compare_value (low)
         movl     28(%esp), %edx    # 28(%esp) : compare_value (high)
         movl     20(%esp), %edi    # 20(%esp) : dest
-        cmpl     $0, 32(%esp)      # 32(%esp) : is_MP
-        je       1f
-        lock
-1:      cmpxchg8b (%edi)
+        lock cmpxchg8b (%edi)
         popl     %edi
         popl     %ebx
         ret
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed May 03 02:32:02 2017 +0000
@@ -404,7 +404,7 @@
   // is available to us as well
   Sysinfo cpu_info(SI_CPUBRAND);
   bool use_solaris_12_api = cpu_info.valid();
-  const char* impl;
+  const char* impl = "unknown";
   int impl_m = 0;
   if (use_solaris_12_api) {
     impl = cpu_info.value();
@@ -431,7 +431,7 @@
       kstat_close(kc);
     }
   }
-  assert(impl_m != 0, "Unknown CPU implementation %s", impl);
+  assert(impl_m != 0, "Unrecognized CPU implementation: %s", impl);
   features |= impl_m;
 
   bool is_sun4v = (features & sun4v_m) != 0;
--- a/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,31 +52,19 @@
 // For Sun Studio - implementation is in solaris_x86_[32/64].il.
 // For gcc - implementation is just below.
 
-// The lock prefix can be omitted for certain instructions on uniprocessors; to
-// facilitate this, os::is_MP() is passed as an additional argument.  64-bit
-// processors are assumed to be multi-threaded and/or multi-core, so the extra
-// argument is unnecessary.
-#ifndef _LP64
-#define IS_MP_DECL() , int is_mp
-#define IS_MP_ARG()  , (int) os::is_MP()
-#else
-#define IS_MP_DECL()
-#define IS_MP_ARG()
-#endif // _LP64
-
 extern "C" {
-  jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
+  jint _Atomic_add(jint add_value, volatile jint* dest);
   jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
   jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
-                       jbyte compare_value IS_MP_DECL());
+                             jbyte compare_value);
   jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-                       jint compare_value IS_MP_DECL());
+                       jint compare_value);
   jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
-                             jlong compare_value IS_MP_DECL());
+                             jlong compare_value);
 }
 
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return _Atomic_add(add_value, dest IS_MP_ARG());
+  return _Atomic_add(add_value, dest);
 }
 
 inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
@@ -85,15 +73,15 @@
 
 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
+  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value);
 }
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
+  return _Atomic_cmpxchg(exchange_value, dest, compare_value);
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
 }
 
 
@@ -174,25 +162,23 @@
 #endif // AMD64
 
 #ifdef _GNU_SOURCE
-// Add a lock prefix to an instruction on an MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
 
 extern "C" {
-  inline jint _Atomic_add(jint add_value, volatile jint* dest, int mp) {
+  inline jint _Atomic_add(jint add_value, volatile jint* dest) {
     jint addend = add_value;
-    __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+    __asm__ volatile ("lock xaddl %0,(%2)"
                     : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "0" (addend), "r" (dest)
                     : "cc", "memory");
     return addend + add_value;
   }
 
 #ifdef AMD64
-  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp) {
+  inline jlong _Atomic_add_long(jlong add_value, volatile jlong* dest) {
     intptr_t addend = add_value;
-    __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+    __asm__ __volatile__ ("lock xaddq %0,(%2)"
                         : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "0" (addend), "r" (dest)
                         : "cc", "memory");
     return addend + add_value;
   }
@@ -215,35 +201,35 @@
     return exchange_value;
   }
 
-  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+  inline jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
+    __asm__ volatile ("lock cmpxchgl %1,(%3)"
                     : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
     return exchange_value;
   }
 
 
-  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
-    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+    __asm__ volatile ("lock cmpxchgb %1,(%3)"
                     : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest)
                     : "cc", "memory");
     return exchange_value;
   }
 
   // This is the interface to the atomic instruction in solaris_i486.s.
-  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
+  jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value);
 
-  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp) {
+  inline jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
 #ifdef AMD64
-    __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+    __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
                         : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest)
                         : "cc", "memory");
     return exchange_value;
 #else
-    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value, os::is_MP());
+    return _Atomic_cmpxchg_long_gcc(exchange_value, dest, compare_value);
 
     #if 0
     // The code below does not work presumably because of the bug in gcc
@@ -255,23 +241,19 @@
     volatile jlong_accessor evl, cvl, rv;
     evl.long_value = exchange_value;
     cvl.long_value = compare_value;
-    int mp = os::is_MP();
 
-    __asm__ volatile ("cmp $0, %%esi\n\t"
-       "je 1f \n\t"
-       "lock\n\t"
-       "1: cmpxchg8b (%%edi)\n\t"
+    __asm__ volatile (
+       "lock cmpxchg8b (%%edi)\n\t"
        : "=a"(cvl.words[0]),   "=d"(cvl.words[1])
        : "a"(cvl.words[0]), "d"(cvl.words[1]),
          "b"(evl.words[0]), "c"(evl.words[1]),
-         "D"(dest), "S"(mp)
+         "D"(dest)
        :  "cc", "memory");
     return cvl.long_value;
     #endif // if 0
 #endif // AMD64
   }
 }
-#undef LOCK_IF_MP
 
 #endif // _GNU_SOURCE
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,18 +55,12 @@
       .end
 
   // Support for jint Atomic::add(jint inc, volatile jint* dest)
-  // An additional bool (os::is_MP()) is passed as the last argument.
       .inline _Atomic_add,3
       movl     0(%esp), %eax   // inc
       movl     4(%esp), %edx   // dest
       movl     %eax, %ecx
-      cmpl     $0, 8(%esp)     // MP test
-      jne      1f
-      xaddl    %eax, (%edx)
-      jmp      2f
-1:    lock
-      xaddl    %eax, (%edx)
-2:    addl     %ecx, %eax
+      lock xaddl %eax, (%edx)
+      addl     %ecx, %eax
       .end
 
   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
@@ -79,41 +73,26 @@
   // Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
   //                                   volatile jbyte *dest,
   //                                   jbyte compare_value)
-  // An additional bool (os::is_MP()) is passed as the last argument.
       .inline _Atomic_cmpxchg_byte,4
       movb     8(%esp), %al   // compare_value
       movb     0(%esp), %cl   // exchange_value
       movl     4(%esp), %edx   // dest
-      cmp      $0, 12(%esp)    // MP test
-      jne      1f
-      cmpxchgb %cl, (%edx)
-      jmp      2f
-1:    lock
-      cmpxchgb %cl, (%edx)
-2:
+      lock cmpxchgb %cl, (%edx)
       .end
 
   // Support for jint Atomic::cmpxchg(jint exchange_value,
   //                                  volatile jint *dest,
   //                                  jint compare_value)
-  // An additional bool (os::is_MP()) is passed as the last argument.
       .inline _Atomic_cmpxchg,4
       movl     8(%esp), %eax   // compare_value
       movl     0(%esp), %ecx   // exchange_value
       movl     4(%esp), %edx   // dest
-      cmp      $0, 12(%esp)    // MP test
-      jne      1f
-      cmpxchgl %ecx, (%edx)
-      jmp      2f
-1:    lock
-      cmpxchgl %ecx, (%edx)
-2:
+      lock cmpxchgl %ecx, (%edx)
       .end
 
   // Support for jlong Atomic::cmpxchg(jlong exchange_value,
   //                                   volatile jlong* dest,
   //                                   jlong compare_value)
-  // An additional bool (os::is_MP()) is passed as the last argument.
       .inline _Atomic_cmpxchg_long,6
       pushl    %ebx
       pushl    %edi
@@ -122,13 +101,8 @@
       movl     16(%esp), %edi  // dest
       movl     8(%esp), %ebx   // exchange_value (low)
       movl     12(%esp), %ecx  // exchange_high (high)
-      cmp      $0, 28(%esp)    // MP test
-      jne      1f
-      cmpxchg8b (%edi)
-      jmp      2f
-1:    lock
-      cmpxchg8b (%edi)
-2:    popl     %edi
+      lock cmpxchg8b (%edi)
+      popl     %edi
       popl     %ebx
       .end
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -643,8 +643,7 @@
 
         / Support for jlong Atomic::cmpxchg(jlong exchange_value,
         /                                   volatile jlong* dest,
-        /                                   jlong compare_value,
-        /                                   bool is_MP)
+        /                                   jlong compare_value)
         / Used only for Solaris/gcc builds
         .align 16
 _Atomic_cmpxchg_long_gcc:
@@ -656,10 +655,7 @@
         movl     24(%esp), %eax    / 24(%esp) : compare_value (low)
         movl     28(%esp), %edx    / 28(%esp) : compare_value (high)
         movl     20(%esp), %edi    / 20(%esp) : dest
-        cmpl     $0, 32(%esp)      / 32(%esp) : is_MP
-        je       1f
-        lock
-1:      cmpxchg8b (%edi)
+        lock cmpxchg8b (%edi)
         popl     %edi
         popl     %ebx
         ret
--- a/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,15 +57,6 @@
 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
-// Adding a lock prefix to an instruction on MP machine
-// VC++ doesn't like the lock prefix to be on a single line
-// so we can't insert a label after the lock prefix.
-// By emitting a lock prefix, we can define a label after it.
-#define LOCK_IF_MP(mp) __asm cmp mp, 0  \
-                       __asm je L0      \
-                       __asm _emit 0xF0 \
-                       __asm L0:
-
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
@@ -144,13 +135,11 @@
 #else // !AMD64
 
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  int mp = os::is_MP();
   __asm {
     mov edx, dest;
     mov eax, add_value;
     mov ecx, eax;
-    LOCK_IF_MP(mp)
-    xadd dword ptr [edx], eax;
+    lock xadd dword ptr [edx], eax;
     add eax, ecx;
   }
 }
@@ -165,11 +154,9 @@
 
 inline void Atomic::inc    (volatile jint*     dest) {
   // alternative for InterlockedIncrement
-  int mp = os::is_MP();
   __asm {
     mov edx, dest;
-    LOCK_IF_MP(mp)
-    add dword ptr [edx], 1;
+    lock add dword ptr [edx], 1;
   }
 }
 
@@ -183,11 +170,9 @@
 
 inline void Atomic::dec    (volatile jint*     dest) {
   // alternative for InterlockedDecrement
-  int mp = os::is_MP();
   __asm {
     mov edx, dest;
-    LOCK_IF_MP(mp)
-    sub dword ptr [edx], 1;
+    lock sub dword ptr [edx], 1;
   }
 }
 
@@ -219,30 +204,25 @@
 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
   // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
   __asm {
     mov edx, dest
     mov cl, exchange_value
     mov al, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg byte ptr [edx], cl
+    lock cmpxchg byte ptr [edx], cl
   }
 }
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
   // alternative for InterlockedCompareExchange
-  int mp = os::is_MP();
   __asm {
     mov edx, dest
     mov ecx, exchange_value
     mov eax, compare_value
-    LOCK_IF_MP(mp)
-    cmpxchg dword ptr [edx], ecx
+    lock cmpxchg dword ptr [edx], ecx
   }
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
   jint ex_lo  = (jint)exchange_value;
   jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
   jint cmp_lo = (jint)compare_value;
@@ -255,8 +235,7 @@
     mov edi, dest
     mov ebx, ex_lo
     mov ecx, ex_hi
-    LOCK_IF_MP(mp)
-    cmpxchg8b qword ptr [edi]
+    lock cmpxchg8b qword ptr [edi]
     pop edi
     pop ebx
   }
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed May 03 02:32:02 2017 +0000
@@ -102,6 +102,7 @@
   // ModuleEntryTable or PackageEntryTable created for it. The defining package
   // and module for an anonymous class will be found in its host class.
   if (!is_anonymous) {
+    _packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
     if (h_class_loader.is_null()) {
       // Create unnamed module for boot loader
       _unnamed_module = ModuleEntry::create_boot_unnamed_module(this);
@@ -297,8 +298,8 @@
   if (_modules != NULL) {
     for (int i = 0; i < _modules->table_size(); i++) {
       for (ModuleEntry* entry = _modules->bucket(i);
-                              entry != NULL;
-                              entry = entry->next()) {
+           entry != NULL;
+           entry = entry->next()) {
         f(entry);
       }
     }
@@ -306,13 +307,12 @@
 }
 
 void ClassLoaderData::packages_do(void f(PackageEntry*)) {
-  // Lock-free access requires load_ptr_acquire
-  PackageEntryTable* packages = load_ptr_acquire(&_packages);
-  if (packages != NULL) {
-    for (int i = 0; i < packages->table_size(); i++) {
-      for (PackageEntry* entry = packages->bucket(i);
-                              entry != NULL;
-                              entry = entry->next()) {
+  assert_locked_or_safepoint(Module_lock);
+  if (_packages != NULL) {
+    for (int i = 0; i < _packages->table_size(); i++) {
+      for (PackageEntry* entry = _packages->bucket(i);
+           entry != NULL;
+           entry = entry->next()) {
         f(entry);
       }
     }
@@ -494,22 +494,6 @@
   free_deallocate_list();
 }
 
-PackageEntryTable* ClassLoaderData::packages() {
-  // Lazily create the package entry table at first request.
-  // Lock-free access requires load_ptr_acquire.
-  PackageEntryTable* packages = load_ptr_acquire(&_packages);
-  if (packages == NULL) {
-    MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
-    // Check if _packages got allocated while we were waiting for this lock.
-    if ((packages = _packages) == NULL) {
-      packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
-      // Ensure _packages is stable, since it is examined without a lock
-      OrderAccess::release_store_ptr(&_packages, packages);
-    }
-  }
-  return packages;
-}
-
 ModuleEntryTable* ClassLoaderData::modules() {
   // Lazily create the module entry table at first request.
   // Lock-free access requires load_ptr_acquire.
@@ -1096,7 +1080,7 @@
     // occur after each class loader's aliveness is determined.
     data = _head;
     while (data != NULL) {
-      if (data->packages_defined()) {
+      if (data->packages() != NULL) {
         data->packages()->purge_all_package_exports();
       }
       if (data->modules_defined()) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Wed May 03 02:32:02 2017 +0000
@@ -347,8 +347,7 @@
   bool contains_klass(Klass* k);
   void record_dependency(const Klass* to, TRAPS);
   void init_dependencies(TRAPS);
-  PackageEntryTable* packages();
-  bool packages_defined() { return (_packages != NULL); }
+  PackageEntryTable* packages() { return _packages; }
   ModuleEntry* unnamed_module() { return _unnamed_module; }
   ModuleEntryTable* modules();
   bool modules_defined() { return (_modules != NULL); }
--- a/hotspot/src/share/vm/classfile/packageEntry.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/classfile/packageEntry.cpp	Wed May 03 02:32:02 2017 +0000
@@ -174,8 +174,6 @@
 }
 
 PackageEntryTable::~PackageEntryTable() {
-  assert_locked_or_safepoint(Module_lock);
-
   // Walk through all buckets and all entries in each bucket,
   // freeing each entry.
   for (int i = 0; i < table_size(); ++i) {
@@ -271,6 +269,7 @@
 // Called when a define module for java.base is being processed.
 // Verify the packages loaded thus far are in java.base's package list.
 void PackageEntryTable::verify_javabase_packages(GrowableArray<Symbol*> *pkg_list) {
+  assert_lock_strong(Module_lock);
   for (int i = 0; i < table_size(); i++) {
     for (PackageEntry* entry = bucket(i);
                        entry != NULL;
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -218,11 +218,11 @@
   HandleMark hm;
   Klass* klass = receiver->klass();
   InstanceKlass* ik = InstanceKlass::cast(klass);
-  klassVtable* vt = ik->vtable();
+  klassVtable vt = ik->vtable();
   ik->print();
   fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
         "index %d (vtable length %d)",
-        p2i(receiver), index, vt->length());
+        p2i(receiver), index, vt.length());
 }
 
 #endif // PRODUCT
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Wed May 03 02:32:02 2017 +0000
@@ -923,8 +923,6 @@
 
           double end_vtime_sec = os::elapsedVTime();
           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
-          _cm->clear_has_overflown();
-
           _cm->do_yield_check();
 
           jlong sleep_time_ms;
--- a/hotspot/src/share/vm/gc/serial/markSweep.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.cpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -224,12 +224,12 @@
   mark_and_push_closure.set_ref_processor(_ref_processor);
 }
 
-MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
+AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
 template <typename T>
-void MarkSweep::AdjustPointerClosure::do_oop_nv(T* p)      { adjust_pointer(p); }
-void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { do_oop_nv(p); }
-void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
+void AdjustPointerClosure::do_oop_nv(T* p)      { MarkSweep::adjust_pointer(p); }
+void AdjustPointerClosure::do_oop(oop* p)       { do_oop_nv(p); }
+void AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
 
 void MarkSweep::adjust_marks() {
   assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
@@ -280,79 +280,5 @@
   MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
 }
 
-int InstanceKlass::oop_ms_adjust_pointers(oop obj) {
-  int size = size_helper();
-  oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::adjust_pointer_closure);
-  return size;
-}
-
-int InstanceMirrorKlass::oop_ms_adjust_pointers(oop obj) {
-  int size = oop_size(obj);
-  InstanceKlass::oop_ms_adjust_pointers(obj);
-
-  oop_oop_iterate_statics<true>(obj, &MarkSweep::adjust_pointer_closure);
-  return size;
-}
-
-int InstanceClassLoaderKlass::oop_ms_adjust_pointers(oop obj) {
-  return InstanceKlass::oop_ms_adjust_pointers(obj);
-}
-
-#ifdef ASSERT
-template <class T> static void trace_reference_gc(const char *s, oop obj,
-                                                  T* referent_addr,
-                                                  T* next_addr,
-                                                  T* discovered_addr) {
-  log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
-  log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
-  log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
-  log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
-                             p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
-}
-#endif
-
-template <class T> void static adjust_object_specialized(oop obj) {
-  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
-  MarkSweep::adjust_pointer(referent_addr);
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  MarkSweep::adjust_pointer(next_addr);
-  T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-  MarkSweep::adjust_pointer(discovered_addr);
-  debug_only(trace_reference_gc("InstanceRefKlass::oop_ms_adjust_pointers", obj,
-                                referent_addr, next_addr, discovered_addr);)
-}
-
-int InstanceRefKlass::oop_ms_adjust_pointers(oop obj) {
-  int size = size_helper();
-  InstanceKlass::oop_ms_adjust_pointers(obj);
-
-  if (UseCompressedOops) {
-    adjust_object_specialized<narrowOop>(obj);
-  } else {
-    adjust_object_specialized<oop>(obj);
-  }
-  return size;
-}
-
-int ObjArrayKlass::oop_ms_adjust_pointers(oop obj) {
-  assert(obj->is_objArray(), "obj must be obj array");
-  objArrayOop a = objArrayOop(obj);
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call.
-  int size = a->object_size();
-  oop_oop_iterate_elements<true>(a, &MarkSweep::adjust_pointer_closure);
-  return size;
-}
-
-int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) {
-  assert(obj->is_typeArray(), "must be a type array");
-  typeArrayOop t = typeArrayOop(obj);
-  // Performance tweak: We skip iterating over the klass pointer since we
-  // know that Universe::TypeArrayKlass never moves.
-  return t->object_size();
-}
-
 // Generate MS specialized oop_oop_iterate functions.
 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/gc/serial/markSweep.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
 // declared at end
 class PreservedMark;
 class MarkAndPushClosure;
+class AdjustPointerClosure;
 
 class MarkSweep : AllStatic {
   //
@@ -66,16 +67,6 @@
     virtual void do_void();
   };
 
-  class AdjustPointerClosure: public OopsInGenClosure {
-   public:
-    template <typename T> void do_oop_nv(T* p);
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
-
-    // This closure provides its own oop verification code.
-    debug_only(virtual bool should_verify_oops() { return false; })
-  };
-
   // Used for java/lang/ref handling
   class IsAliveClosure: public BoolObjectClosure {
    public:
@@ -201,6 +192,17 @@
   }
 };
 
+class AdjustPointerClosure: public OopsInGenClosure {
+ public:
+  template <typename T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+  virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
+
+  // This closure provides its own oop verification code.
+  debug_only(virtual bool should_verify_oops() { return false; })
+};
+
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
 private:
   oop _obj;
--- a/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Wed May 03 02:32:02 2017 +0000
@@ -42,7 +42,7 @@
 }
 
 inline int MarkSweep::adjust_pointers(oop obj) {
-  return obj->ms_adjust_pointers();
+  return obj->oop_iterate_size(&MarkSweep::adjust_pointer_closure);
 }
 
 template <class T> inline void MarkSweep::adjust_pointer(T* p) {
--- a/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
 class FilteringClosure;
 // MarkSweep
 class MarkAndPushClosure;
+class AdjustPointerClosure;
 // ParNew
 class ParScanWithBarrierClosure;
 class ParScanWithoutBarrierClosure;
@@ -90,7 +91,8 @@
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
 
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)      \
-  f(MarkAndPushClosure,_nv)
+  f(MarkAndPushClosure,_nv)                             \
+  f(AdjustPointerClosure,_nv)
 
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)     \
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed May 03 02:32:02 2017 +0000
@@ -86,14 +86,15 @@
 // State accessors
 
 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
-  last_frame(thread).interpreter_frame_set_bcp(bcp);
+  LastFrameAccessor last_frame(thread);
+  last_frame.set_bcp(bcp);
   if (ProfileInterpreter) {
     // ProfileTraps uses MDOs independently of ProfileInterpreter.
     // That is why we must check both ProfileInterpreter and mdo != NULL.
-    MethodData* mdo = last_frame(thread).interpreter_frame_method()->method_data();
+    MethodData* mdo = last_frame.method()->method_data();
     if (mdo != NULL) {
       NEEDS_CLEANUP;
-      last_frame(thread).interpreter_frame_set_mdp(mdo->bci_to_dp(last_frame(thread).interpreter_frame_bci()));
+      last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci()));
     }
   }
 }
@@ -104,8 +105,9 @@
 
 IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
   // access constant pool
-  ConstantPool* pool = method(thread)->constants();
-  int index = wide ? get_index_u2(thread, Bytecodes::_ldc_w) : get_index_u1(thread, Bytecodes::_ldc);
+  LastFrameAccessor last_frame(thread);
+  ConstantPool* pool = last_frame.method()->constants();
+  int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc);
   constantTag tag = pool->tag_at(index);
 
   assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call");
@@ -118,13 +120,14 @@
   assert(bytecode == Bytecodes::_fast_aldc ||
          bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
   ResourceMark rm(thread);
-  methodHandle m (thread, method(thread));
-  Bytecode_loadconstant ldc(m, bci(thread));
+  LastFrameAccessor last_frame(thread);
+  methodHandle m (thread, last_frame.method());
+  Bytecode_loadconstant ldc(m, last_frame.bci());
   oop result = ldc.resolve_constant(CHECK);
 #ifdef ASSERT
   {
     // The bytecode wrappers aren't GC-safe so construct a new one
-    Bytecode_loadconstant ldc2(m, bci(thread));
+    Bytecode_loadconstant ldc2(m, last_frame.bci());
     oop coop = m->constants()->resolved_references()->obj_at(ldc2.cache_index());
     assert(result == coop, "expected result for assembly code");
   }
@@ -181,10 +184,11 @@
 
 IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
   // We may want to pass in more arguments - could make this slightly faster
-  ConstantPool* constants = method(thread)->constants();
-  int          i = get_index_u2(thread, Bytecodes::_multianewarray);
-  Klass* klass = constants->klass_at(i, CHECK);
-  int   nof_dims = number_of_dimensions(thread);
+  LastFrameAccessor last_frame(thread);
+  ConstantPool* constants = last_frame.method()->constants();
+  int          i = last_frame.get_index_u2(Bytecodes::_multianewarray);
+  Klass* klass   = constants->klass_at(i, CHECK);
+  int   nof_dims = last_frame.number_of_dimensions();
   assert(klass->is_klass(), "not a class");
   assert(nof_dims >= 1, "multianewarray rank must be nonzero");
 
@@ -216,8 +220,9 @@
 // Quicken instance-of and check-cast bytecodes
 IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
   // Force resolving; quicken the bytecode
-  int which = get_index_u2(thread, Bytecodes::_checkcast);
-  ConstantPool* cpool = method(thread)->constants();
+  LastFrameAccessor last_frame(thread);
+  int which = last_frame.get_index_u2(Bytecodes::_checkcast);
+  ConstantPool* cpool = last_frame.method()->constants();
   // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
   // program we might have seen an unquick'd bytecode in the interpreter but have another
   // thread quicken the bytecode before we get here.
@@ -256,8 +261,9 @@
 // If necessary, create an MDO to hold the information, and record it.
 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
   assert(ProfileTraps, "call me only if profiling");
-  methodHandle trap_method(thread, method(thread));
-  int trap_bci = trap_method->bci_from(bcp(thread));
+  LastFrameAccessor last_frame(thread);
+  methodHandle trap_method(thread, last_frame.method());
+  int trap_bci = trap_method->bci_from(last_frame.bcp());
   note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
 }
 
@@ -390,12 +396,13 @@
 // invoke w/o arguments (i.e., as if one were inside the call).
 IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception))
 
+  LastFrameAccessor last_frame(thread);
   Handle             h_exception(thread, exception);
-  methodHandle       h_method   (thread, method(thread));
+  methodHandle       h_method   (thread, last_frame.method());
   constantPoolHandle h_constants(thread, h_method->constants());
   bool               should_repeat;
   int                handler_bci;
-  int                current_bci = bci(thread);
+  int                current_bci = last_frame.bci();
 
   if (thread->frames_to_pop_failed_realloc() > 0) {
     // Allocation of scalar replaced object used in this frame
@@ -493,7 +500,7 @@
   // notify JVMTI of an exception throw; JVMTI will detect if this is a first
   // time throw or a stack unwinding throw and accordingly notify the debugger
   if (JvmtiExport::can_post_on_exceptions()) {
-    JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
+    JvmtiExport::post_exception_throw(thread, h_method(), last_frame.bcp(), h_exception());
   }
 
 #ifdef CC_INTERP
@@ -556,20 +563,21 @@
   Thread* THREAD = thread;
   // resolve field
   fieldDescriptor info;
-  constantPoolHandle pool(thread, method(thread)->constants());
-  methodHandle m(thread, method(thread));
+  LastFrameAccessor last_frame(thread);
+  constantPoolHandle pool(thread, last_frame.method()->constants());
+  methodHandle m(thread, last_frame.method());
   bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_nofast_putfield ||
                     bytecode == Bytecodes::_putstatic);
   bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
 
   {
     JvmtiHideSingleStepping jhss(thread);
-    LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
+    LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode),
                                        m, bytecode, CHECK);
   } // end JvmtiHideSingleStepping
 
   // check if link resolution caused cpCache to be updated
-  ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+  ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
   if (cp_cache_entry->is_resolved(bytecode)) return;
 
   // compute auxiliary field attributes
@@ -718,15 +726,15 @@
 
 void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) {
   Thread* THREAD = thread;
+  LastFrameAccessor last_frame(thread);
   // extract receiver from the outgoing argument list if necessary
   Handle receiver(thread, NULL);
   if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
     ResourceMark rm(thread);
-    methodHandle m (thread, method(thread));
-    Bytecode_invoke call(m, bci(thread));
+    methodHandle m (thread, last_frame.method());
+    Bytecode_invoke call(m, last_frame.bci());
     Symbol* signature = call.signature();
-    receiver = Handle(thread,
-                  thread->last_frame().interpreter_callee_receiver(signature));
+    receiver = Handle(thread, last_frame.callee_receiver(signature));
     assert(Universe::heap()->is_in_reserved_or_null(receiver()),
            "sanity check");
     assert(receiver.is_null() ||
@@ -736,12 +744,12 @@
 
   // resolve method
   CallInfo info;
-  constantPoolHandle pool(thread, method(thread)->constants());
+  constantPoolHandle pool(thread, last_frame.method()->constants());
 
   {
     JvmtiHideSingleStepping jhss(thread);
     LinkResolver::resolve_invoke(info, receiver, pool,
-                                 get_index_u2_cpcache(thread, bytecode), bytecode,
+                                 last_frame.get_index_u2_cpcache(bytecode), bytecode,
                                  CHECK);
     if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
       int retry_count = 0;
@@ -753,14 +761,14 @@
                   "Could not resolve to latest version of redefined method");
         // method is redefined in the middle of resolve so re-try.
         LinkResolver::resolve_invoke(info, receiver, pool,
-                                     get_index_u2_cpcache(thread, bytecode), bytecode,
+                                     last_frame.get_index_u2_cpcache(bytecode), bytecode,
                                      CHECK);
       }
     }
   } // end JvmtiHideSingleStepping
 
   // check if link resolution caused cpCache to be updated
-  ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+  ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
   if (cp_cache_entry->is_resolved(bytecode)) return;
 
 #ifdef ASSERT
@@ -815,33 +823,35 @@
 void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) {
   Thread* THREAD = thread;
   const Bytecodes::Code bytecode = Bytecodes::_invokehandle;
+  LastFrameAccessor last_frame(thread);
 
   // resolve method
   CallInfo info;
-  constantPoolHandle pool(thread, method(thread)->constants());
+  constantPoolHandle pool(thread, last_frame.method()->constants());
   {
     JvmtiHideSingleStepping jhss(thread);
     LinkResolver::resolve_invoke(info, Handle(), pool,
-                                 get_index_u2_cpcache(thread, bytecode), bytecode,
+                                 last_frame.get_index_u2_cpcache(bytecode), bytecode,
                                  CHECK);
   } // end JvmtiHideSingleStepping
 
-  ConstantPoolCacheEntry* cp_cache_entry = cache_entry(thread);
+  ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
   cp_cache_entry->set_method_handle(pool, info);
 }
 
 // First time execution:  Resolve symbols, create a permanent CallSite object.
 void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) {
   Thread* THREAD = thread;
+  LastFrameAccessor last_frame(thread);
   const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
 
   //TO DO: consider passing BCI to Java.
-  //  int caller_bci = method(thread)->bci_from(bcp(thread));
+  //  int caller_bci = last_frame.method()->bci_from(last_frame.bcp());
 
   // resolve method
   CallInfo info;
-  constantPoolHandle pool(thread, method(thread)->constants());
-  int index = get_index_u4(thread, bytecode);
+  constantPoolHandle pool(thread, last_frame.method()->constants());
+  int index = last_frame.get_index_u4(bytecode);
   {
     JvmtiHideSingleStepping jhss(thread);
     LinkResolver::resolve_invoke(info, Handle(), pool,
@@ -895,9 +905,9 @@
     // nm could have been unloaded so look it up again.  It's unsafe
     // to examine nm directly since it might have been freed and used
     // for something else.
-    frame fr = thread->last_frame();
-    Method* method =  fr.interpreter_frame_method();
-    int bci = method->bci_from(fr.interpreter_frame_bcp());
+    LastFrameAccessor last_frame(thread);
+    Method* method =  last_frame.method();
+    int bci = method->bci_from(last_frame.bcp());
     nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
   }
 #ifndef PRODUCT
@@ -917,11 +927,11 @@
   // flag, in case this method triggers classloading which will call into Java.
   UnlockFlagSaver fs(thread);
 
-  frame fr = thread->last_frame();
-  assert(fr.is_interpreted_frame(), "must come from interpreter");
-  methodHandle method(thread, fr.interpreter_frame_method());
+  LastFrameAccessor last_frame(thread);
+  assert(last_frame.is_interpreted_frame(), "must come from interpreter");
+  methodHandle method(thread, last_frame.method());
   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
-  const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
+  const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci;
 
   assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
@@ -937,9 +947,9 @@
     if (UseBiasedLocking) {
       ResourceMark rm;
       GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
-      for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
-           kptr < fr.interpreter_frame_monitor_begin();
-           kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
+      for( BasicObjectLock *kptr = last_frame.monitor_end();
+           kptr < last_frame.monitor_begin();
+           kptr = last_frame.next_monitor(kptr) ) {
         if( kptr->obj() != NULL ) {
           objects_to_revoke->append(Handle(THREAD, kptr->obj()));
         }
@@ -964,9 +974,9 @@
   UnlockFlagSaver fs(thread);
 
   assert(ProfileInterpreter, "must be profiling interpreter");
-  frame fr = thread->last_frame();
-  assert(fr.is_interpreted_frame(), "must come from interpreter");
-  methodHandle method(thread, fr.interpreter_frame_method());
+  LastFrameAccessor last_frame(thread);
+  assert(last_frame.is_interpreted_frame(), "must come from interpreter");
+  methodHandle method(thread, last_frame.method());
   Method::build_interpreter_method_data(method, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
@@ -1011,9 +1021,9 @@
   assert(ProfileInterpreter, "must be profiling interpreter");
   ResourceMark rm(thread);
   HandleMark hm(thread);
-  frame fr = thread->last_frame();
-  assert(fr.is_interpreted_frame(), "must come from interpreter");
-  MethodData* h_mdo = fr.interpreter_frame_method()->method_data();
+  LastFrameAccessor last_frame(thread);
+  assert(last_frame.is_interpreted_frame(), "must come from interpreter");
+  MethodData* h_mdo = last_frame.method()->method_data();
 
   // Grab a lock to ensure atomic access to setting the return bci and
   // the displacement.  This can block and GC, invalidating all naked oops.
@@ -1021,10 +1031,10 @@
 
   // ProfileData is essentially a wrapper around a derived oop, so we
   // need to take the lock before making any ProfileData structures.
-  ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(fr.interpreter_frame_mdp()));
+  ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp()));
   RetData* rdata = data->as_RetData();
   address new_mdp = rdata->fixup_ret(return_bci, h_mdo);
-  fr.interpreter_frame_set_mdp(new_mdp);
+  last_frame.set_mdp(new_mdp);
 IRT_END
 
 IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m))
@@ -1049,7 +1059,8 @@
     // We are called during regular safepoints and when the VM is
     // single stepping. If any thread is marked for single stepping,
     // then we may have JVMTI work to do.
-    JvmtiExport::at_single_stepping_point(thread, method(thread), bcp(thread));
+    LastFrameAccessor last_frame(thread);
+    JvmtiExport::at_single_stepping_point(thread, last_frame.method(), last_frame.bcp());
   }
 IRT_END
 
@@ -1072,7 +1083,8 @@
   }
   InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass());
   jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static);
-  JvmtiExport::post_field_access(thread, method(thread), bcp(thread), cp_entry_f1, h_obj, fid);
+  LastFrameAccessor last_frame(thread);
+  JvmtiExport::post_field_access(thread, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid);
 IRT_END
 
 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread,
@@ -1127,17 +1139,20 @@
     h_obj = Handle(thread, obj);
   }
 
-  JvmtiExport::post_raw_field_modification(thread, method(thread), bcp(thread), ik, h_obj,
+  LastFrameAccessor last_frame(thread);
+  JvmtiExport::post_raw_field_modification(thread, last_frame.method(), last_frame.bcp(), ik, h_obj,
                                            fid, sig_type, &fvalue);
 IRT_END
 
 IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread))
-  JvmtiExport::post_method_entry(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
+  LastFrameAccessor last_frame(thread);
+  JvmtiExport::post_method_entry(thread, last_frame.method(), last_frame.get_frame());
 IRT_END
 
 
 IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread))
-  JvmtiExport::post_method_exit(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
+  LastFrameAccessor last_frame(thread);
+  JvmtiExport::post_method_exit(thread, last_frame.method(), last_frame.get_frame());
 IRT_END
 
 IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc))
@@ -1362,10 +1377,10 @@
   ResetNoHandleMark rnm; // In a LEAF entry.
   HandleMark hm;
   ResourceMark rm;
-  frame fr = thread->last_frame();
-  assert(fr.is_interpreted_frame(), "");
-  jint bci = fr.interpreter_frame_bci();
-  methodHandle mh(thread, fr.interpreter_frame_method());
+  LastFrameAccessor last_frame(thread);
+  assert(last_frame.is_interpreted_frame(), "");
+  jint bci = last_frame.bci();
+  methodHandle mh(thread, last_frame.method());
   Bytecode_invoke invoke(mh, bci);
   ArgumentSizeComputer asc(invoke.signature());
   int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver
@@ -1411,10 +1426,10 @@
 // The generated code still uses call_VM because that will set up the frame pointer for
 // bcp and method.
 IRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
-  const frame f = thread->last_frame();
-  assert(f.is_interpreted_frame(), "must be an interpreted frame");
-  methodHandle mh(thread, f.interpreter_frame_method());
-  BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
+  LastFrameAccessor last_frame(thread);
+  assert(last_frame.is_interpreted_frame(), "must be an interpreted frame");
+  methodHandle mh(thread, last_frame.method());
+  BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2);
   return preserve_this_value;
 IRT_END
 #endif // !PRODUCT
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed May 03 02:32:02 2017 +0000
@@ -42,29 +42,54 @@
   friend class PrintingClosure; // for method and bcp
 
  private:
-  // Helper functions to access current interpreter state
-  static frame     last_frame(JavaThread *thread)    { return thread->last_frame(); }
-  static Method*   method(JavaThread *thread)        { return last_frame(thread).interpreter_frame_method(); }
-  static address   bcp(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bcp(); }
-  static int       bci(JavaThread *thread)           { return last_frame(thread).interpreter_frame_bci(); }
-  static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
-  static Bytecodes::Code code(JavaThread *thread)    {
+  // Helper class to access current interpreter state
+  class LastFrameAccessor : public StackObj {
+    frame _last_frame;
+  public:
+    LastFrameAccessor(JavaThread* thread) {
+      assert(thread == Thread::current(), "sanity");
+      _last_frame = thread->last_frame();
+    }
+    bool is_interpreted_frame() const              { return _last_frame.is_interpreted_frame(); }
+    Method*   method() const                       { return _last_frame.interpreter_frame_method(); }
+    address   bcp() const                          { return _last_frame.interpreter_frame_bcp(); }
+    int       bci() const                          { return _last_frame.interpreter_frame_bci(); }
+    address   mdp() const                          { return _last_frame.interpreter_frame_mdp(); }
+
+    void      set_bcp(address bcp)                 { _last_frame.interpreter_frame_set_bcp(bcp); }
+    void      set_mdp(address dp)                  { _last_frame.interpreter_frame_set_mdp(dp); }
+
     // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
-    return Bytecodes::code_at(method(thread), bcp(thread));
-  }
-  static Bytecode  bytecode(JavaThread *thread)      { return Bytecode(method(thread), bcp(thread)); }
-  static int       get_index_u1(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread).get_index_u1(bc); }
-  static int       get_index_u2(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread).get_index_u2(bc); }
-  static int       get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread).get_index_u2_cpcache(bc); }
-  static int       get_index_u4(JavaThread *thread, Bytecodes::Code bc)
-                                                        { return bytecode(thread).get_index_u4(bc); }
-  static int       number_of_dimensions(JavaThread *thread)  { return bcp(thread)[3]; }
+    Bytecodes::Code code() const                   { return Bytecodes::code_at(method(), bcp()); }
+
+    Bytecode  bytecode() const                     { return Bytecode(method(), bcp()); }
+    int get_index_u1(Bytecodes::Code bc) const     { return bytecode().get_index_u1(bc); }
+    int get_index_u2(Bytecodes::Code bc) const     { return bytecode().get_index_u2(bc); }
+    int get_index_u2_cpcache(Bytecodes::Code bc) const
+                                                   { return bytecode().get_index_u2_cpcache(bc); }
+    int get_index_u4(Bytecodes::Code bc) const     { return bytecode().get_index_u4(bc); }
+    int number_of_dimensions() const               { return bcp()[3]; }
+    ConstantPoolCacheEntry* cache_entry_at(int i) const
+                                                   { return method()->constants()->cache()->entry_at(i); }
+    ConstantPoolCacheEntry* cache_entry() const    { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
 
-  static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i)  { return method(thread)->constants()->cache()->entry_at(i); }
-  static ConstantPoolCacheEntry* cache_entry(JavaThread *thread)            { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
+    oop callee_receiver(Symbol* signature) {
+      return _last_frame.interpreter_callee_receiver(signature);
+    }
+    BasicObjectLock* monitor_begin() const {
+      return _last_frame.interpreter_frame_monitor_end();
+    }
+    BasicObjectLock* monitor_end() const {
+      return _last_frame.interpreter_frame_monitor_begin();
+    }
+    BasicObjectLock* next_monitor(BasicObjectLock* current) const {
+      return _last_frame.next_monitor_in_interpreter_frame(current);
+    }
+
+    frame& get_frame()                             { return _last_frame; }
+  };
+
+  static void      set_bcp_and_mdp(address bcp, JavaThread*thread);
   static void      note_trap_inner(JavaThread* thread, int reason,
                                    methodHandle trap_method, int trap_bci, TRAPS);
   static void      note_trap(JavaThread *thread, int reason, TRAPS);
@@ -139,7 +164,7 @@
   static void _breakpoint(JavaThread* thread, Method* method, address bcp);
   static Bytecodes::Code get_original_bytecode_at(JavaThread* thread, Method* method, address bcp);
   static void            set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code);
-  static bool is_breakpoint(JavaThread *thread) { return Bytecodes::code_or_bp_at(bcp(thread)) == Bytecodes::_breakpoint; }
+  static bool is_breakpoint(JavaThread *thread) { return Bytecodes::code_or_bp_at(LastFrameAccessor(thread).bcp()) == Bytecodes::_breakpoint; }
 
   // Safepoints
   static void    at_safepoint(JavaThread* thread);
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed May 03 02:32:02 2017 +0000
@@ -150,8 +150,6 @@
     kind = CallInfo::vtable_call;
   } else if (!resolved_klass->is_interface()) {
     // A default or miranda method.  Compute the vtable index.
-    ResourceMark rm;
-    klassVtable* vt = resolved_klass->vtable();
     index = LinkResolver::vtable_index_of_interface_method(resolved_klass,
                            resolved_method);
     assert(index >= 0 , "we should have valid vtable index at this point");
@@ -163,7 +161,7 @@
 #ifdef ASSERT
     // Ensure that this is really the case.
     Klass* object_klass = SystemDictionary::Object_klass();
-    Method * object_resolved_method = object_klass->vtable()->method_at(index);
+    Method * object_resolved_method = object_klass->vtable().method_at(index);
     assert(object_resolved_method->name() == resolved_method->name(),
       "Object and interface method names should match at vtable index %d, %s != %s",
       index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string());
@@ -400,9 +398,8 @@
   }
   if (vtable_index == Method::invalid_vtable_index) {
     // get vtable_index for miranda methods
-    ResourceMark rm;
-    klassVtable *vt = ik->vtable();
-    vtable_index = vt->index_of_miranda(name, signature);
+    klassVtable vt = ik->vtable();
+    vtable_index = vt.index_of_miranda(name, signature);
   }
   return vtable_index;
 }
--- a/hotspot/src/share/vm/memory/iterator.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,16 @@
  public:
   ReferenceProcessor* ref_processor() const { return _ref_processor; }
 
+  // Iteration of InstanceRefKlasses differ depending on the closure,
+  // the below enum describes the different alternatives.
+  enum ReferenceIterationMode {
+    DO_DISCOVERY, // Apply closure and discover references
+    DO_FIELDS     // Apply closure to all fields
+  };
+
+  // The default iteration mode is to do discovery.
+  virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERY; }
+
   // If the do_metadata functions return "true",
   // we invoke the following when running oop_iterate():
   //
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed May 03 02:32:02 2017 +0000
@@ -526,8 +526,7 @@
 // In case those ever change we use handles for oops
 void Universe::reinitialize_vtable_of(Klass* ko, TRAPS) {
   // init vtable of k and all subclasses
-  klassVtable* vt = ko->vtable();
-  if (vt) vt->initialize_vtable(false, CHECK);
+  ko->vtable().initialize_vtable(false, CHECK);
   if (ko->is_instance_klass()) {
     for (Klass* sk = ko->subklass();
          sk != NULL;
@@ -539,7 +538,7 @@
 
 
 void initialize_itable_for_klass(Klass* k, TRAPS) {
-  InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
+  InstanceKlass::cast(k)->itable().initialize_itable(false, CHECK);
 }
 
 
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp	Wed May 03 02:32:02 2017 +0000
@@ -99,7 +99,7 @@
 void ArrayKlass::complete_create_array_klass(ArrayKlass* k, Klass* super_klass, ModuleEntry* module_entry, TRAPS) {
   ResourceMark rm(THREAD);
   k->initialize_supers(super_klass, CHECK);
-  k->vtable()->initialize_vtable(false, CHECK);
+  k->vtable().initialize_vtable(false, CHECK);
 
   // During bootstrapping, before java.base is defined, the module_entry may not be present yet.
   // These classes will be put on a fixup list and their module fields will be patched once
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,8 +48,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed May 03 02:32:02 2017 +0000
@@ -373,8 +373,8 @@
   return !is_initialized();
 }
 
-klassItable* InstanceKlass::itable() const {
-  return new klassItable(const_cast<InstanceKlass*>(this));
+klassItable InstanceKlass::itable() const {
+  return klassItable(const_cast<InstanceKlass*>(this));
 }
 
 void InstanceKlass::eager_initialize(Thread *thread) {
@@ -621,15 +621,14 @@
       if (!(is_shared() &&
             loader_data->is_the_null_class_loader_data())) {
         ResourceMark rm(THREAD);
-        vtable()->initialize_vtable(true, CHECK_false);
-        itable()->initialize_itable(true, CHECK_false);
+        vtable().initialize_vtable(true, CHECK_false);
+        itable().initialize_itable(true, CHECK_false);
       }
 #ifdef ASSERT
       else {
-        ResourceMark rm(THREAD);
-        vtable()->verify(tty, true);
+        vtable().verify(tty, true);
         // In case itable verification is ever added.
-        // itable()->verify(tty, true);
+        // itable().verify(tty, true);
       }
 #endif
       set_init_state(linked);
@@ -807,8 +806,8 @@
   // Step 9
   if (!HAS_PENDING_EXCEPTION) {
     set_initialization_state_and_notify(fully_initialized, CHECK);
-    { ResourceMark rm(THREAD);
-      debug_only(vtable()->verify(tty, true);)
+    {
+      debug_only(vtable().verify(tty, true);)
     }
   }
   else {
@@ -2041,8 +2040,8 @@
     // vtables in the shared system dictionary, only the main one.
     // It also redefines the itable too so fix that too.
     ResourceMark rm(THREAD);
-    vtable()->initialize_vtable(false, CHECK);
-    itable()->initialize_itable(false, CHECK);
+    vtable().initialize_vtable(false, CHECK);
+    itable().initialize_itable(false, CHECK);
   }
 
   // restore constant pool resolved references
@@ -3212,10 +3211,9 @@
 
   // Verify vtables
   if (is_linked()) {
-    ResourceMark rm;
     // $$$ This used to be done only for m/s collections.  Doing it
     // always seemed a valid generalization.  (DLD -- 6/00)
-    vtable()->verify(st);
+    vtable().verify(st);
   }
 
   // Verify first subklass
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1127,7 +1127,7 @@
   }
 
   // Java itable
-  klassItable* itable() const;        // return new klassItable wrapper
+  klassItable itable() const;        // return klassItable wrapper
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
 #if INCLUDE_JVMTI
@@ -1163,8 +1163,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -89,8 +89,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
--- a/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,8 +58,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
@@ -107,6 +105,30 @@
   template <bool nv, class OopClosureType>
   inline void oop_oop_iterate_ref_processing(oop obj, OopClosureType* closure);
 
+  // Building blocks for specialized handling.
+  template <bool nv, typename T, class OopClosureType, class Contains>
+  static void do_referent(oop obj, OopClosureType* closure, Contains& contains);
+
+  template <bool nv, typename T, class OopClosureType, class Contains>
+  static void do_next(oop obj, OopClosureType* closure, Contains& contains);
+
+  template <bool nv, typename T, class OopClosureType, class Contains>
+  static void do_discovered(oop obj, OopClosureType* closure, Contains& contains);
+
+  template <typename T, class OopClosureType>
+  static bool try_discover(oop obj, ReferenceType type, OopClosureType* closure);
+
+  // Do discovery while handling InstanceRefKlasses. Reference discovery
+  // is only done if the closure provides a ReferenceProcessor.
+  template <bool nv, typename T, class OopClosureType, class Contains>
+  static void oop_oop_iterate_discovery(oop obj, ReferenceType type, OopClosureType* closure, Contains& contains);
+
+  // Apply the closure to all fields. No reference discovery is done.
+  template <bool nv, typename T, class OopClosureType, class Contains>
+  static void oop_oop_iterate_fields(oop obj, OopClosureType* closure, Contains& contains);
+
+  template <typename T>
+  static void trace_reference_gc(const char *s, oop obj, T* referent_addr, T* next_addr, T* discovered_addr) NOT_DEBUG_RETURN;
 
  public:
 
--- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,36 +36,96 @@
 #include "utilities/macros.hpp"
 
 template <bool nv, typename T, class OopClosureType, class Contains>
-void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains) {
-  T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+void InstanceRefKlass::do_referent(oop obj, OopClosureType* closure, Contains& contains) {
+  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+  if (contains(referent_addr)) {
+    Devirtualizer<nv>::do_oop(closure, referent_addr);
+  }
+}
+
+template <bool nv, typename T, class OopClosureType, class Contains>
+void InstanceRefKlass::do_next(oop obj, OopClosureType* closure, Contains& contains) {
+  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+  if (contains(next_addr)) {
+    Devirtualizer<nv>::do_oop(closure, next_addr);
+  }
+}
+
+template <bool nv, typename T, class OopClosureType, class Contains>
+void InstanceRefKlass::do_discovered(oop obj, OopClosureType* closure, Contains& contains) {
+  T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+  if (contains(discovered_addr)) {
+    Devirtualizer<nv>::do_oop(closure, discovered_addr);
+  }
+}
+
+template <typename T, class OopClosureType>
+bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
+  ReferenceProcessor* rp = closure->ref_processor();
+  if (rp != NULL) {
+    T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr(obj));
+    if (!oopDesc::is_null(referent_oop)) {
+      oop referent = oopDesc::decode_heap_oop_not_null(referent_oop);
+      if (!referent->is_gc_marked()) {
+        // Only try to discover if not yet marked.
+        return rp->discover_reference(obj, type);
+      }
+    }
+  }
+  return false;
+}
+
+template <bool nv, typename T, class OopClosureType, class Contains>
+void InstanceRefKlass::oop_oop_iterate_discovery(oop obj, ReferenceType type, OopClosureType* closure, Contains& contains) {
+  log_develop_trace(gc, ref)("Process reference with discovery " PTR_FORMAT, p2i(obj));
+
+  // Special case for some closures.
   if (closure->apply_to_weak_ref_discovered_field()) {
-    Devirtualizer<nv>::do_oop(closure, disc_addr);
+    do_discovered<nv, T>(obj, closure, contains);
   }
 
-  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  ReferenceProcessor* rp = closure->ref_processor();
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!referent->is_gc_marked() && (rp != NULL) &&
-        rp->discover_reference(obj, reference_type())) {
-      return;
-    } else if (contains(referent_addr)) {
-      // treat referent as normal oop
-      Devirtualizer<nv>::do_oop(closure, referent_addr);
-    }
+  // Try to discover reference and return if it succeeds.
+  if (try_discover<T>(obj, type, closure)) {
+    return;
+  }
+
+  // Treat referent as normal oop.
+  do_referent<nv, T>(obj, closure, contains);
+
+  // Treat discovered as normal oop, if ref is not "active" (next non-NULL).
+  T next_oop  = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr(obj));
+  if (!oopDesc::is_null(next_oop)) {
+    do_discovered<nv, T>(obj, closure, contains);
   }
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  T next_oop  = oopDesc::load_heap_oop(next_addr);
-  // Treat discovered as normal oop, if ref is not "active" (next non-NULL)
-  if (!oopDesc::is_null(next_oop) && contains(disc_addr)) {
-    // i.e. ref is not "active"
-    log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(disc_addr));
-    Devirtualizer<nv>::do_oop(closure, disc_addr);
-  }
-  // treat next as normal oop
-  if (contains(next_addr)) {
-    Devirtualizer<nv>::do_oop(closure, next_addr);
+
+  // Treat next as normal oop.
+  do_next<nv, T>(obj, closure, contains);
+}
+
+template <bool nv, typename T, class OopClosureType, class Contains>
+void InstanceRefKlass::oop_oop_iterate_fields(oop obj, OopClosureType* closure, Contains& contains) {
+  do_referent<nv, T>(obj, closure, contains);
+  do_discovered<nv, T>(obj, closure, contains);
+  do_next<nv, T>(obj, closure, contains);
+
+  trace_reference_gc("InstanceRefKlass::oop_oop_iterate_fields()",
+                     obj,
+                     (T*)java_lang_ref_Reference::referent_addr(obj),
+                     (T*)java_lang_ref_Reference::next_addr(obj),
+                     (T*)java_lang_ref_Reference::discovered_addr(obj));
+}
+
+template <bool nv, typename T, class OopClosureType, class Contains>
+void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains) {
+  switch (closure->reference_iteration_mode()) {
+    case ExtendedOopClosure::DO_DISCOVERY:
+      oop_oop_iterate_discovery<nv, T>(obj, reference_type(), closure, contains);
+      break;
+    case ExtendedOopClosure::DO_FIELDS:
+      oop_oop_iterate_fields<nv, T>(obj, closure, contains);
+      break;
+    default:
+      ShouldNotReachHere();
   }
 }
 
@@ -125,6 +185,19 @@
   oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
 }
 
+#ifdef ASSERT
+template <typename T>
+void InstanceRefKlass::trace_reference_gc(const char *s, oop obj, T* referent_addr, T* next_addr, T* discovered_addr) {
+  log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
+  log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+      p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
+  log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+      p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
+  log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+      p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
+}
+#endif
+
 // Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
 // all closures.  Macros calling macros above for each oop size.
 #define ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/hotspot/src/share/vm/oops/klass.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/klass.cpp	Wed May 03 02:32:02 2017 +0000
@@ -497,10 +497,12 @@
 
   // Null out class_loader_data because we don't share that yet.
   set_class_loader_data(NULL);
+  set_is_shared();
 }
 
 void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
   assert(is_klass(), "ensure C++ vtable is restored");
+  assert(is_shared(), "must be set");
   TRACE_RESTORE_ID(this);
 
   // If an exception happened during CDS restore, some of these fields may already be
@@ -696,8 +698,8 @@
   guarantee(obj->klass()->is_klass(), "klass field is not a klass");
 }
 
-klassVtable* Klass::vtable() const {
-  return new klassVtable(const_cast<Klass*>(this), start_of_vtable(), vtable_length() / vtableEntry::size());
+klassVtable Klass::vtable() const {
+  return klassVtable(const_cast<Klass*>(this), start_of_vtable(), vtable_length() / vtableEntry::size());
 }
 
 vtableEntry* Klass::start_of_vtable() const {
--- a/hotspot/src/share/vm/oops/klass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -399,7 +399,7 @@
 #endif
 
   // vtables
-  klassVtable* vtable() const;
+  klassVtable vtable() const;
   int vtable_length() const { return _vtable_len; }
 
   // subclass check
@@ -563,6 +563,8 @@
   void set_has_vanilla_constructor()    { _access_flags.set_has_vanilla_constructor(); }
   bool has_miranda_methods () const     { return access_flags().has_miranda_methods(); }
   void set_has_miranda_methods()        { _access_flags.set_has_miranda_methods(); }
+  bool is_shared() const                { return access_flags().is_shared_class(); } // shadows MetaspaceObj::is_shared)()
+  void set_is_shared()                  { _access_flags.set_is_shared_class(); }
 
   bool is_cloneable() const;
   void set_is_cloneable();
@@ -607,8 +609,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  virtual int  oop_ms_adjust_pointers(oop obj) = 0;
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   virtual void oop_ps_push_contents(  oop obj, PSPromotionManager* pm)   = 0;
--- a/hotspot/src/share/vm/oops/klassVtable.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp	Wed May 03 02:32:02 2017 +0000
@@ -136,22 +136,22 @@
     // methods from super class for shared class, as that was already done
     // during archiving time. However, if Jvmti has redefined a class,
     // copy super class's vtable in case the super class has changed.
-    return super->vtable()->length();
+    return super->vtable().length();
   } else {
     // copy methods from superKlass
-    klassVtable* superVtable = super->vtable();
-    assert(superVtable->length() <= _length, "vtable too short");
+    klassVtable superVtable = super->vtable();
+    assert(superVtable.length() <= _length, "vtable too short");
 #ifdef ASSERT
-    superVtable->verify(tty, true);
+    superVtable.verify(tty, true);
 #endif
-    superVtable->copy_vtable_to(table());
+    superVtable.copy_vtable_to(table());
     if (log_develop_is_enabled(Trace, vtables)) {
       ResourceMark rm;
       log_develop_trace(vtables)("copy vtable from %s to %s size %d",
                                  super->internal_name(), klass()->internal_name(),
                                  _length);
     }
-    return superVtable->length();
+    return superVtable.length();
   }
 }
 
@@ -290,9 +290,9 @@
   InstanceKlass* superk = initialsuper;
   while (superk != NULL && superk->super() != NULL) {
     InstanceKlass* supersuperklass = InstanceKlass::cast(superk->super());
-    klassVtable* ssVtable = supersuperklass->vtable();
-    if (vtable_index < ssVtable->length()) {
-      Method* super_method = ssVtable->method_at(vtable_index);
+    klassVtable ssVtable = supersuperklass->vtable();
+    if (vtable_index < ssVtable.length()) {
+      Method* super_method = ssVtable.method_at(vtable_index);
 #ifndef PRODUCT
       Symbol* name= target_method()->name();
       Symbol* signature = target_method()->signature();
@@ -445,8 +445,8 @@
     if (is_preinitialized_vtable()) {
       // If this is a shared class, the vtable is already in the final state (fully
       // initialized). Need to look at the super's vtable.
-      klassVtable* superVtable = super->vtable();
-      super_method = superVtable->method_at(i);
+      klassVtable superVtable = super->vtable();
+      super_method = superVtable.method_at(i);
     } else {
       super_method = method_at(i);
     }
@@ -1014,15 +1014,16 @@
 void itableMethodEntry::initialize(Method* m) {
   if (m == NULL) return;
 
+#ifdef ASSERT
   if (MetaspaceShared::is_in_shared_space((void*)&_method) &&
      !MetaspaceShared::remapped_readwrite()) {
     // At runtime initialize_itable is rerun as part of link_class_impl()
     // for a shared class loaded by the non-boot loader.
     // The dumptime itable method entry should be the same as the runtime entry.
     assert(_method == m, "sanity");
-  } else {
-    _method = m;
   }
+#endif
+  _method = m;
 }
 
 klassItable::klassItable(InstanceKlass* klass) {
@@ -1249,17 +1250,6 @@
   }
 }
 
-// Update entry for specific Method*
-void klassItable::initialize_with_method(Method* m) {
-  itableMethodEntry* ime = method_entry(0);
-  for(int i = 0; i < _size_method_table; i++) {
-    if (ime->method() == m) {
-      ime->initialize(m);
-    }
-    ime++;
-  }
-}
-
 #if INCLUDE_JVMTI
 // search the itable for uses of either obsolete or EMCP methods
 void klassItable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
@@ -1488,9 +1478,9 @@
   Klass* super = _klass->super();
   if (super != NULL) {
     InstanceKlass* sk = InstanceKlass::cast(super);
-    klassVtable* vt = sk->vtable();
-    for (int i = 0; i < vt->length(); i++) {
-      verify_against(st, vt, i);
+    klassVtable vt = sk->vtable();
+    for (int i = 0; i < vt.length(); i++) {
+      verify_against(st, &vt, i);
     }
   }
 }
@@ -1557,8 +1547,7 @@
 
   static void do_class(Klass* k) {
     Klass* kl = k;
-    klassVtable* vt = kl->vtable();
-    if (vt == NULL) return;
+    klassVtable vt = kl->vtable();
     no_klasses++;
     if (kl->is_instance_klass()) {
       no_instance_klasses++;
@@ -1566,9 +1555,9 @@
     }
     if (kl->is_array_klass()) {
       no_array_klasses++;
-      sum_of_array_vtable_len += vt->length();
+      sum_of_array_vtable_len += vt.length();
     }
-    sum_of_vtable_len += vt->length();
+    sum_of_vtable_len += vt.length();
   }
 
   static void compute() {
--- a/hotspot/src/share/vm/oops/klassVtable.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp	Wed May 03 02:32:02 2017 +0000
@@ -41,7 +41,7 @@
 
 class vtableEntry;
 
-class klassVtable : public ResourceObj {
+class klassVtable VALUE_OBJ_CLASS_SPEC {
   Klass*       _klass;            // my klass
   int          _tableOffset;      // offset of start of vtable data within klass
   int          _length;           // length of vtable (number of entries)
@@ -288,7 +288,7 @@
 //    -- vtable for interface 2 ---
 //    ...
 //
-class klassItable : public ResourceObj {
+class klassItable VALUE_OBJ_CLASS_SPEC {
  private:
   InstanceKlass*       _klass;             // my klass
   int                  _table_offset;      // offset of start of itable data within klass (in words)
@@ -310,9 +310,6 @@
   // Initialization
   void initialize_itable(bool checkconstraints, TRAPS);
 
-  // Updates
-  void initialize_with_method(Method* m);
-
 #if INCLUDE_JVMTI
   // RedefineClasses() API support:
   // if any entry of this itable points to any of old_methods,
--- a/hotspot/src/share/vm/oops/method.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Wed May 03 02:32:02 2017 +0000
@@ -1191,7 +1191,6 @@
   }
 
   assert(ik->is_subclass_of(method_holder()), "should be subklass");
-  assert(ik->vtable() != NULL, "vtable should exist");
   if (!has_vtable_index()) {
     return false;
   } else {
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -112,8 +112,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
--- a/hotspot/src/share/vm/oops/oop.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -325,10 +325,6 @@
 
   // Garbage Collection support
 
-  // Mark Sweep
-  // Adjust all pointers in this object to point at it's forwarded location and
-  // return the size of this oop. This is used by the MarkSweep collector.
-  inline int  ms_adjust_pointers();
 #if INCLUDE_ALL_GCS
   // Parallel Compact
   inline void pc_follow_contents(ParCompactionManager* cm);
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -664,13 +664,6 @@
   }
 }
 
-int oopDesc::ms_adjust_pointers() {
-  debug_only(int check_size = size());
-  int s = klass()->oop_ms_adjust_pointers(this);
-  assert(s == check_size, "should be the same");
-  return s;
-}
-
 #if INCLUDE_ALL_GCS
 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
   klass()->oop_pc_follow_contents(this, cm);
--- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,8 +74,6 @@
 
   // GC specific object visitors
   //
-  // Mark Sweep
-  int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
   void oop_ps_push_contents(  oop obj, PSPromotionManager* pm);
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed May 03 02:32:02 2017 +0000
@@ -3272,7 +3272,7 @@
   // If the class being redefined is java.lang.Object, we need to fix all
   // array class vtables also
   if (k->is_array_klass() && _the_class == SystemDictionary::Object_klass()) {
-    k->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+    k->vtable().adjust_method_entries(the_class, &trace_name_printed);
 
   } else if (k->is_instance_klass()) {
     HandleMark hm(_thread);
@@ -3315,7 +3315,7 @@
       // ik->vtable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
 
-      ik->vtable()->adjust_method_entries(the_class, &trace_name_printed);
+      ik->vtable().adjust_method_entries(the_class, &trace_name_printed);
       ik->adjust_default_methods(the_class, &trace_name_printed);
     }
 
@@ -3329,10 +3329,8 @@
     if (ik->itable_length() > 0 && (_the_class->is_interface()
         || _the_class == SystemDictionary::internal_Unsafe_klass()
         || ik->is_subclass_of(_the_class))) {
-      // ik->itable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
-
-      ik->itable()->adjust_method_entries(the_class, &trace_name_printed);
+      ik->itable().adjust_method_entries(the_class, &trace_name_printed);
     }
 
     // The constant pools in other classes (other_cp) can refer to
@@ -3957,8 +3955,8 @@
     // compare_and_normalize_class_versions has already checked:
     //  - classloaders unchanged, signatures unchanged
     //  - all instanceKlasses for redefined classes reused & contents updated
-    the_class->vtable()->initialize_vtable(false, THREAD);
-    the_class->itable()->initialize_itable(false, THREAD);
+    the_class->vtable().initialize_vtable(false, THREAD);
+    the_class->itable().initialize_itable(false, THREAD);
     assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception");
   }
 
@@ -4093,12 +4091,12 @@
   // a vtable should never contain old or obsolete methods
   ResourceMark rm(_thread);
   if (k->vtable_length() > 0 &&
-      !k->vtable()->check_no_old_or_obsolete_entries()) {
+      !k->vtable().check_no_old_or_obsolete_entries()) {
     if (log_is_enabled(Trace, redefine, class, obsolete, metadata)) {
       log_trace(redefine, class, obsolete, metadata)
         ("klassVtable::check_no_old_or_obsolete_entries failure -- OLD or OBSOLETE method found -- class: %s",
          k->signature_name());
-      k->vtable()->dump_vtable();
+      k->vtable().dump_vtable();
     }
     no_old_methods = false;
   }
@@ -4109,12 +4107,12 @@
 
     // an itable should never contain old or obsolete methods
     if (ik->itable_length() > 0 &&
-        !ik->itable()->check_no_old_or_obsolete_entries()) {
+        !ik->itable().check_no_old_or_obsolete_entries()) {
       if (log_is_enabled(Trace, redefine, class, obsolete, metadata)) {
         log_trace(redefine, class, obsolete, metadata)
           ("klassItable::check_no_old_or_obsolete_entries failure -- OLD or OBSOLETE method found -- class: %s",
            ik->signature_name());
-        ik->itable()->dump_itable();
+        ik->itable().dump_itable();
       }
       no_old_methods = false;
     }
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Wed May 03 02:32:02 2017 +0000
@@ -218,7 +218,7 @@
         m_klass_non_interface = SystemDictionary::Object_klass();
 #ifdef ASSERT
         { ResourceMark rm;
-          Method* m2 = m_klass_non_interface->vtable()->method_at(vmindex);
+          Method* m2 = m_klass_non_interface->vtable().method_at(vmindex);
           assert(m->name() == m2->name() && m->signature() == m2->signature(),
                  "at %d, %s != %s", vmindex,
                  m->name_and_sig_as_C_string(), m2->name_and_sig_as_C_string());
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsCompiler.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsCompiler.cpp	Wed May 03 02:32:02 2017 +0000
@@ -289,10 +289,10 @@
 }
 
 Flag::Error ArraycopyDstPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
-  if (value != 0) {
+  if (value >= 4032) {
     CommandLineError::print(verbose,
-                            "ArraycopyDstPrefetchDistance (" UINTX_FORMAT ") must be 0\n",
-                            value);
+                            "ArraycopyDstPrefetchDistance (" UINTX_FORMAT ") must be"
+                            "between 0 and 4031\n", value);
     return Flag::VIOLATES_CONSTRAINT;
   }
 
@@ -300,10 +300,10 @@
 }
 
 Flag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
-  if (value != 0) {
+  if (value >= 4032) {
     CommandLineError::print(verbose,
-                            "ArraycopySrcPrefetchDistance (" UINTX_FORMAT ") must be 0\n",
-                            value);
+                            "ArraycopySrcPrefetchDistance (" UINTX_FORMAT ") must be"
+                            "between 0 and 4031\n", value);
     return Flag::VIOLATES_CONSTRAINT;
   }
 
--- a/hotspot/src/share/vm/runtime/globals.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Wed May 03 02:32:02 2017 +0000
@@ -865,16 +865,15 @@
 Flag* Flag::flags = flagTable;
 size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag));
 
-inline bool str_equal(const char* s, const char* q, size_t len) {
-  // s is null terminated, q is not!
-  if (strlen(s) != (unsigned int) len) return false;
-  return strncmp(s, q, len) == 0;
+inline bool str_equal(const char* s, size_t s_len, const char* q, size_t q_len) {
+  if (s_len != q_len) return false;
+  return memcmp(s, q, q_len) == 0;
 }
 
 // Search the flag table for a named flag
 Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
   for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
-    if (str_equal(current->_name, name, length)) {
+    if (str_equal(current->_name, current->get_name_length(), name, length)) {
       // Found a matching entry.
       // Don't report notproduct and develop flags in product builds.
       if (current->is_constant_in_binary()) {
@@ -895,6 +894,14 @@
   return NULL;
 }
 
+// Get or compute the flag name length
+size_t Flag::get_name_length() {
+  if (_name_len == 0) {
+    _name_len = strlen(_name);
+  }
+  return _name_len;
+}
+
 // Compute string similarity based on Dice's coefficient
 static float str_similar(const char* str1, const char* str2, size_t len2) {
   int len1 = (int) strlen(str1);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed May 03 02:32:02 2017 +0000
@@ -186,6 +186,7 @@
   void* _addr;
   NOT_PRODUCT(const char* _doc;)
   Flags _flags;
+  size_t _name_len;
 
   // points to all Flags static array
   static Flag* flags;
@@ -247,6 +248,8 @@
   Flags get_origin();
   void set_origin(Flags origin);
 
+  size_t get_name_length();
+
   bool is_default();
   bool is_ergonomic();
   bool is_command_line();
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed May 03 02:32:02 2017 +0000
@@ -199,6 +199,7 @@
 typedef HashtableEntry<InstanceKlass*, mtClass>  KlassHashtableEntry;
 typedef TwoOopHashtable<Symbol*, mtClass>     SymbolTwoOopHashtable;
 typedef CompactHashtable<Symbol*, char>       SymbolCompactHashTable;
+typedef RehashableHashtable<Symbol*, mtSymbol>   RehashableSymbolHashtable;
 
 //--------------------------------------------------------------------------------
 // VM_STRUCTS
@@ -584,6 +585,7 @@
                                                                                                                                      \
      static_field(SymbolTable,                 _the_table,                                    SymbolTable*)                          \
      static_field(SymbolTable,                 _shared_table,                                 SymbolCompactHashTable)                \
+     static_field(RehashableSymbolHashtable,   _seed,                                         juint)                                 \
                                                                                                                                      \
   /***************/                                                                                                                  \
   /* StringTable */                                                                                                                  \
@@ -1602,6 +1604,8 @@
                                                                           \
   declare_toplevel_type(BasicHashtable<mtInternal>)                       \
     declare_type(IntptrHashtable, BasicHashtable<mtInternal>)             \
+  declare_toplevel_type(BasicHashtable<mtSymbol>)                         \
+    declare_type(RehashableSymbolHashtable, BasicHashtable<mtSymbol>)     \
   declare_type(SymbolTable, SymbolHashtable)                              \
   declare_type(StringTable, StringHashtable)                              \
     declare_type(LoaderConstraintTable, KlassHashtable)                   \
--- a/hotspot/src/share/vm/utilities/accessFlags.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/utilities/accessFlags.hpp	Wed May 03 02:32:02 2017 +0000
@@ -64,6 +64,7 @@
   JVM_ACC_HAS_FINALIZER           = 0x40000000,     // True if klass has a non-empty finalize() method
   JVM_ACC_IS_CLONEABLE_FAST       = (int)0x80000000,// True if klass implements the Cloneable interface and can be optimized in generated code
   JVM_ACC_HAS_FINAL_METHOD        = 0x01000000,     // True if klass has final method
+  JVM_ACC_IS_SHARED_CLASS         = 0x02000000,     // True if klass is shared
 
   // Klass* and Method* flags
   JVM_ACC_HAS_LOCAL_VARIABLE_TABLE= 0x00200000,
@@ -146,6 +147,8 @@
   bool has_finalizer           () const { return (_flags & JVM_ACC_HAS_FINALIZER          ) != 0; }
   bool has_final_method        () const { return (_flags & JVM_ACC_HAS_FINAL_METHOD       ) != 0; }
   bool is_cloneable_fast       () const { return (_flags & JVM_ACC_IS_CLONEABLE_FAST      ) != 0; }
+  bool is_shared_class         () const { return (_flags & JVM_ACC_IS_SHARED_CLASS        ) != 0; }
+
   // Klass* and Method* flags
   bool has_localvariable_table () const { return (_flags & JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) != 0; }
   void set_has_localvariable_table()    { atomic_set_bits(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE); }
@@ -216,6 +219,7 @@
   void set_has_final_method()          { atomic_set_bits(JVM_ACC_HAS_FINAL_METHOD);        }
   void set_is_cloneable_fast()         { atomic_set_bits(JVM_ACC_IS_CLONEABLE_FAST);       }
   void set_has_miranda_methods()       { atomic_set_bits(JVM_ACC_HAS_MIRANDA_METHODS);     }
+  void set_is_shared_class()           { atomic_set_bits(JVM_ACC_IS_SHARED_CLASS);         }
 
  public:
   // field flags
--- a/hotspot/src/share/vm/utilities/debug.cpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/utilities/debug.cpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -490,7 +490,7 @@
 extern "C" void dump_vtable(address p) {
   Command c("dump_vtable");
   Klass* k = (Klass*)p;
-  k->vtable()->print();
+  k->vtable().print();
 }
 
 
--- a/hotspot/src/share/vm/utilities/hashtable.hpp	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp	Wed May 03 02:32:02 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,6 +294,7 @@
 };
 
 template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
+ friend class VMStructs;
  protected:
 
   enum {
--- a/hotspot/test/ProblemList.txt	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/ProblemList.txt	Wed May 03 02:32:02 2017 +0000
@@ -113,6 +113,7 @@
 gc/g1/humongousObjects/objectGraphTest/TestObjectGraphAfterGC.java 8156755 generic-all
 gc/survivorAlignment/TestPromotionToSurvivor.java 8129886 generic-all
 gc/g1/logging/TestG1LoggingFailure.java 8169634 generic-all
+gc/g1/humongousObjects/TestHeapCounters.java 8178918 generic-all
 
 #############################################################################
 
@@ -121,7 +122,6 @@
 runtime/CompressedOops/UseCompressedOops.java 8079353 generic-all
 # This test is disabled since it will stress NMT and timeout during normal testing
 runtime/NMT/MallocStressTest.java 8166548 generic-all
-runtime/SharedArchiveFile/BootAppendTests.java 8150683 generic-all
 runtime/SharedArchiveFile/DefaultUseWithClient.java 8154204 generic-all
 
 #############################################################################
--- a/hotspot/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java	Wed May 03 02:32:02 2017 +0000
@@ -27,6 +27,7 @@
  *          attempting to use CDS archive. JVM should exit gracefully
  *          when sharing mode is ON, and continue w/o sharing if sharing
  *          mode is AUTO.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/BootAppendTests.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/BootAppendTests.java	Wed May 03 02:32:02 2017 +0000
@@ -24,6 +24,7 @@
 /**
  * @test
  * @summary Testing -Xbootclasspath/a support for CDS
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
@@ -73,7 +74,7 @@
         logTestCase("1");
         testBootAppendModuleClass();
 
-        log("TESTCASE: 2");
+        logTestCase("2");
         testBootAppendDuplicateModuleClass();
 
         logTestCase("3");
@@ -123,11 +124,11 @@
         for (String mode : modes) {
             CDSOptions opts = (new CDSOptions())
                 .setXShareMode(mode).setUseVersion(false)
-                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-cp", appJar)
+                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-cp", appJar, "-showversion")
                 .addSuffix(APP_CLASS, BOOT_APPEND_MODULE_CLASS_NAME);
 
-            CDSTestUtils.runWithArchive(opts)
-                .shouldContain("java.lang.ClassNotFoundException: javax.sound.sampled.MyClass");
+            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts);
+            CDSTestUtils.checkExec(out, opts, "java.lang.ClassNotFoundException: javax.sound.sampled.MyClass");
         }
     }
 
@@ -144,11 +145,13 @@
         for (String mode : modes) {
             CDSOptions opts = (new CDSOptions())
                 .setXShareMode(mode).setUseVersion(false)
-                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-cp", appJar)
-                .addSuffix(APP_CLASS, BOOT_APPEND_DUPLICATE_MODULE_CLASS_NAME);
+                .addPrefix("--add-modules", "java.corba", "-showversion",
+                           "-Xbootclasspath/a:" + bootAppendJar, "-cp", appJar)
+                .addSuffix("-Xlog:class+load=info",
+                           APP_CLASS, BOOT_APPEND_DUPLICATE_MODULE_CLASS_NAME);
 
-            CDSTestUtils.runWithArchive(opts)
-                .shouldContain("[class,load] org.omg.CORBA.Context source: jrt:/java.corba");
+            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts);
+            CDSTestUtils.checkExec(out, opts, "[class,load] org.omg.CORBA.Context source: jrt:/java.corba");
         }
     }
 
@@ -164,17 +167,17 @@
         for (String mode : modes) {
             CDSOptions opts = (new CDSOptions())
                 .setXShareMode(mode).setUseVersion(false)
-                .addPrefix("-Xbootclasspath/a:" + bootAppendJar,
+                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-showversion",
                            "--limit-modules=java.base", "-cp", appJar)
                 .addSuffix("-Xlog:class+load=info",
                            APP_CLASS, BOOT_APPEND_MODULE_CLASS_NAME);
 
-            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts)
-                .shouldContain("[class,load] javax.sound.sampled.MyClass");
+            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts);
+            CDSTestUtils.checkExec(out, opts, "[class,load] javax.sound.sampled.MyClass");
 
             // When CDS is enabled, the shared class should be loaded from the archive.
             if (mode.equals("on")) {
-                out.shouldContain("[class,load] javax.sound.sampled.MyClass source: shared objects file");
+                CDSTestUtils.checkExec(out, opts, "[class,load] javax.sound.sampled.MyClass source: shared objects file");
             }
         }
     }
@@ -193,14 +196,16 @@
         for (String mode : modes) {
             CDSOptions opts = (new CDSOptions())
                 .setXShareMode(mode).setUseVersion(false)
-                .addPrefix("-Xbootclasspath/a:" + bootAppendJar,
+                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-showversion",
                            "--limit-modules=java.base", "-cp", appJar)
                 .addSuffix("-Xlog:class+load=info",
                            APP_CLASS, BOOT_APPEND_DUPLICATE_MODULE_CLASS_NAME);
 
-            CDSTestUtils.runWithArchive(opts)
-                .shouldContain("[class,load] org.omg.CORBA.Context")
-                .shouldMatch(".*\\[class,load\\] org.omg.CORBA.Context source:.*bootAppend.jar");
+            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts);
+            CDSTestUtils.checkExec(out, opts, "[class,load] org.omg.CORBA.Context");
+            if (!CDSTestUtils.isUnableToMap(out)) {
+                out.shouldMatch(".*\\[class,load\\] org.omg.CORBA.Context source:.*bootAppend.jar");
+            }
         }
     }
 
@@ -215,18 +220,18 @@
         for (String mode : modes) {
             CDSOptions opts = (new CDSOptions())
                 .setXShareMode(mode).setUseVersion(false)
-                .addPrefix("-Xbootclasspath/a:" + bootAppendJar,
+                .addPrefix("-Xbootclasspath/a:" + bootAppendJar, "-showversion",
                            "--limit-modules=java.base", "-cp", appJar)
                 .addSuffix("-Xlog:class+load=info",
                            APP_CLASS, BOOT_APPEND_CLASS_NAME);
 
-            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts)
-                .shouldContain("[class,load] nonjdk.myPackage.MyClass");
+            OutputAnalyzer out = CDSTestUtils.runWithArchive(opts);
+            CDSTestUtils.checkExec(out, opts, "[class,load] nonjdk.myPackage.MyClass");
 
             // If CDS is enabled, the nonjdk.myPackage.MyClass should be loaded
             // from the shared archive.
             if (mode.equals("on")) {
-                out.shouldContain(
+                CDSTestUtils.checkExec(out, opts,
                     "[class,load] nonjdk.myPackage.MyClass source: shared objects file");
             }
         }
--- a/hotspot/test/runtime/SharedArchiveFile/CdsDifferentCompactStrings.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/CdsDifferentCompactStrings.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test CdsDifferentCompactStrings
  * @summary CDS (class data sharing) requires the same -XX:[+-]CompactStrings
  *          setting between archive creation time and load time.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Wed May 03 02:32:02 2017 +0000
@@ -28,6 +28,7 @@
  *          This is a negative test; using  object alignment for loading that
  *          is different from object alignment for creating a CDS file
  *          should fail when loading.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @bug 8025642
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test CdsSameObjectAlignment
  * @summary Testing CDS (class data sharing) using varying object alignment.
  *          Using same object alignment for each dump/load pair
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/DefaultUseWithClient.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/DefaultUseWithClient.java	Wed May 03 02:32:02 2017 +0000
@@ -24,6 +24,7 @@
 /*
  * @test DefaultUseWithClient
  * @summary Test default behavior of sharing with -client
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/LargeSharedSpace.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/LargeSharedSpace.java	Wed May 03 02:32:02 2017 +0000
@@ -26,6 +26,7 @@
  * @bug 8168790 8169870
  * @summary Test CDS dumping using specific space size without crashing.
  * The space size used in the test might not be suitable on windows.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @requires (os.family != "windows")
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Wed May 03 02:32:02 2017 +0000
@@ -23,6 +23,7 @@
 
 /* @test LimitSharedSizes
  * @summary Test handling of limits on shared space size
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib /runtime/CommandLine/OptionsValidation/common
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test
  * @bug 8066670
  * @summary Testing -XX:+PrintSharedArchiveAndExit option
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTest.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTest.java	Wed May 03 02:32:02 2017 +0000
@@ -24,6 +24,7 @@
 /*
  * @test SASymbolTableTest
  * @summary Walk symbol table using SA, with and without CDS.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          jdk.hotspot.agent/sun.jvm.hotspot.oops
--- a/hotspot/test/runtime/SharedArchiveFile/SharedArchiveFile.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedArchiveFile.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test
  * @bug 8014138
  * @summary Testing new -XX:SharedArchiveFile=<file-name> option
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
@@ -40,14 +41,14 @@
 // methods to form command line to create/use shared archive.
 public class SharedArchiveFile {
     public static void main(String[] args) throws Exception {
-        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true,
                                 "-XX:+UnlockDiagnosticVMOptions",
                                 "-XX:SharedArchiveFile=./SharedArchiveFile.jsa",
                                 "-Xshare:dump");
         OutputAnalyzer out = CDSTestUtils.executeAndLog(pb, "SharedArchiveFile");
         CDSTestUtils.checkDump(out);
 
-        pb = ProcessTools.createJavaProcessBuilder(
+        pb = ProcessTools.createJavaProcessBuilder(true,
                               "-XX:+UnlockDiagnosticVMOptions",
                               "-XX:SharedArchiveFile=./SharedArchiveFile.jsa",
                               "-Xshare:on", "-version");
--- a/hotspot/test/runtime/SharedArchiveFile/SharedBaseAddress.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedBaseAddress.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test SharedBaseAddress
  * @summary Test variety of values for SharedBaseAddress, making sure
  *          VM handles normal values as well as edge values w/o a crash.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/SharedSymbolTableBucketSize.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SharedSymbolTableBucketSize.java	Wed May 03 02:32:02 2017 +0000
@@ -25,6 +25,7 @@
  * @test
  * @bug 8059510
  * @summary Test SharedSymbolTableBucketSize option
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Wed May 03 02:32:02 2017 +0000
@@ -24,6 +24,7 @@
 /*
  * @test SpaceUtilizationCheck
  * @summary Check if the space utilization for shared spaces is adequate
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
--- a/hotspot/test/runtime/SharedArchiveFile/TestInterpreterMethodEntries.java	Tue May 02 19:27:26 2017 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/TestInterpreterMethodEntries.java	Wed May 03 02:32:02 2017 +0000
@@ -26,6 +26,7 @@
  * @bug 8169711
  * @summary Test interpreter method entries for intrinsics with CDS (class data sharing)
  *          and different settings of the intrinsic flag during dump/use of the archive.
+ * @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management