8199781: Don't use naked == for comparing oops
authorrkennke
Tue, 03 Apr 2018 13:15:27 +0200
changeset 49658 8237a91c1cca
parent 49657 45071514f87a
child 49659 0ed1370f52bb
child 49667 2fef34f04314
8199781: Don't use naked == for comparing oops Reviewed-by: coleenp, eosterlund, jrose
src/hotspot/share/ci/ciEnv.cpp
src/hotspot/share/ci/ciObjectFactory.cpp
src/hotspot/share/classfile/classLoaderData.cpp
src/hotspot/share/classfile/dictionary.cpp
src/hotspot/share/classfile/javaClasses.cpp
src/hotspot/share/classfile/protectionDomainCache.cpp
src/hotspot/share/classfile/systemDictionary.cpp
src/hotspot/share/code/dependencies.cpp
src/hotspot/share/gc/shared/barrierSet.hpp
src/hotspot/share/interpreter/bytecodeInterpreter.cpp
src/hotspot/share/interpreter/interpreterRuntime.cpp
src/hotspot/share/memory/universe.cpp
src/hotspot/share/oops/access.cpp
src/hotspot/share/oops/access.hpp
src/hotspot/share/oops/access.inline.hpp
src/hotspot/share/oops/accessBackend.hpp
src/hotspot/share/oops/accessDecorators.hpp
src/hotspot/share/oops/constantPool.cpp
src/hotspot/share/oops/instanceKlass.cpp
src/hotspot/share/oops/klassVtable.cpp
src/hotspot/share/oops/objArrayKlass.cpp
src/hotspot/share/oops/oop.hpp
src/hotspot/share/prims/jni.cpp
src/hotspot/share/prims/jvm.cpp
src/hotspot/share/prims/methodHandles.cpp
src/hotspot/share/prims/stackwalk.cpp
src/hotspot/share/prims/unsafe.cpp
src/hotspot/share/runtime/biasedLocking.cpp
src/hotspot/share/runtime/handles.hpp
src/hotspot/share/runtime/reflection.cpp
src/hotspot/share/runtime/synchronizer.cpp
src/hotspot/share/services/memoryManager.hpp
src/hotspot/share/services/memoryPool.hpp
src/hotspot/share/services/threadService.cpp
src/hotspot/share/utilities/exceptions.cpp
src/hotspot/share/utilities/growableArray.hpp
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/ci/ciEnv.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -541,7 +541,7 @@
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
-    } else if (k->loader() != accessor->loader() &&
+    } else if (!oopDesc::equals(k->loader(), accessor->loader()) &&
                get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
@@ -592,7 +592,7 @@
     index = cpool->object_to_cp_index(cache_index);
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
-      if (obj == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(obj, Universe::the_null_sentinel())) {
         return ciConstant(T_OBJECT, get_object(NULL));
       }
       BasicType bt = T_OBJECT;
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -249,7 +249,7 @@
   // into the cache.
   Handle keyHandle(Thread::current(), key);
   ciObject* new_object = create_new_object(keyHandle());
-  assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
+  assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
   init_ident_of(new_object);
   assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
 
@@ -450,8 +450,8 @@
   for (int i=0; i<_unloaded_klasses->length(); i++) {
     ciKlass* entry = _unloaded_klasses->at(i);
     if (entry->name()->equals(name) &&
-        entry->loader() == loader &&
-        entry->protection_domain() == domain) {
+        oopDesc::equals(entry->loader(), loader) &&
+        oopDesc::equals(entry->protection_domain(), domain)) {
       // We've found a match.
       return entry;
     }
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -201,7 +201,7 @@
   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 
   void do_oop(oop* p) {
-    if (p != NULL && *p == _target) {
+    if (p != NULL && oopDesc::equals(RawAccess<>::oop_load(p), _target)) {
       _found = true;
     }
   }
@@ -380,7 +380,7 @@
 
     // Just return if this dependency is to a class with the same or a parent
     // class_loader.
-    if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
+    if (oopDesc::equals(from, to) || java_lang_ClassLoader::isAncestor(from, to)) {
       return; // this class loader is in the parent list, no need to add it.
     }
   }
--- a/src/hotspot/share/classfile/dictionary.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/classfile/dictionary.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -161,13 +161,13 @@
 
 bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
 #ifdef ASSERT
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
     for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
-      if (current->object_no_keepalive() == protection_domain) {
+      if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) {
         in_pd_set = true;
         break;
       }
@@ -179,7 +179,7 @@
   }
 #endif /* ASSERT */
 
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Succeeds trivially
     return true;
   }
@@ -187,7 +187,7 @@
   for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
-    if (current->object_no_keepalive() == protection_domain) return true;
+    if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) return true;
   }
   return false;
 }
--- a/src/hotspot/share/classfile/javaClasses.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -872,7 +872,7 @@
   } else {
     assert(Universe::is_module_initialized() ||
            (ModuleEntryTable::javabase_defined() &&
-            (module() == ModuleEntryTable::javabase_moduleEntry()->module())),
+            (oopDesc::equals(module(), ModuleEntryTable::javabase_moduleEntry()->module()))),
            "Incorrect java.lang.Module specification while creating mirror");
     set_module(mirror(), module());
   }
@@ -949,7 +949,7 @@
     }
 
     // set the classLoader field in the java_lang_Class instance
-    assert(class_loader() == k->class_loader(), "should be same");
+    assert(oopDesc::equals(class_loader(), k->class_loader()), "should be same");
     set_class_loader(mirror(), class_loader());
 
     // Setup indirection from klass->mirror
@@ -1463,9 +1463,9 @@
     // Note: create_basic_type_mirror above initializes ak to a non-null value.
     type = ArrayKlass::cast(ak)->element_type();
   } else {
-    assert(java_class == Universe::void_mirror(), "only valid non-array primitive");
+    assert(oopDesc::equals(java_class, Universe::void_mirror()), "only valid non-array primitive");
   }
-  assert(Universe::java_mirror(type) == java_class, "must be consistent");
+  assert(oopDesc::equals(Universe::java_mirror(type), java_class), "must be consistent");
   return type;
 }
 
@@ -3838,14 +3838,14 @@
 }
 
 bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
-  if (mt1 == mt2)
+  if (oopDesc::equals(mt1, mt2))
     return true;
-  if (rtype(mt1) != rtype(mt2))
+  if (!oopDesc::equals(rtype(mt1), rtype(mt2)))
     return false;
   if (ptype_count(mt1) != ptype_count(mt2))
     return false;
   for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
-    if (ptype(mt1, i) != ptype(mt2, i))
+    if (!oopDesc::equals(ptype(mt1, i), ptype(mt2, i)))
       return false;
   }
   return true;
@@ -4043,7 +4043,7 @@
   // This loop taken verbatim from ClassLoader.java:
   do {
     acl = parent(acl);
-    if (cl == acl) {
+    if (oopDesc::equals(cl, acl)) {
       return true;
     }
     assert(++loop_count > 0, "loop_count overflow");
@@ -4073,7 +4073,7 @@
 
   oop cl = SystemDictionary::java_system_loader();
   while(cl != NULL) {
-    if (cl == loader) return true;
+    if (oopDesc::equals(cl, loader)) return true;
     cl = parent(cl);
   }
   return false;
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -132,7 +132,7 @@
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->object_no_keepalive() == protection_domain()) {
+    if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) {
       return e;
     }
   }
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -182,7 +182,7 @@
     return false;
   }
   return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
-       class_loader == _java_system_loader);
+         oopDesc::equals(class_loader, _java_system_loader));
 }
 
 // Returns true if the passed class loader is the platform class loader.
@@ -391,7 +391,7 @@
        ((quicksuperk = childk->super()) != NULL) &&
 
          ((quicksuperk->name() == class_name) &&
-            (quicksuperk->class_loader()  == class_loader()))) {
+            (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) {
            return quicksuperk;
     } else {
       PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data);
@@ -525,7 +525,7 @@
   bool calledholdinglock
       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
   assert(calledholdinglock,"must hold lock for notify");
-  assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
+  assert((!oopDesc::equals(lockObject(), _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
   ObjectSynchronizer::notifyall(lockObject, THREAD);
   intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
   SystemDictionary_lock->wait();
@@ -843,7 +843,7 @@
       // If everything was OK (no exceptions, no null return value), and
       // class_loader is NOT the defining loader, do a little more bookkeeping.
       if (!HAS_PENDING_EXCEPTION && k != NULL &&
-        k->class_loader() != class_loader()) {
+        !oopDesc::equals(k->class_loader(), class_loader())) {
 
         check_constraints(d_hash, k, class_loader, false, THREAD);
 
@@ -989,7 +989,7 @@
   if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
-    guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
@@ -1747,7 +1747,7 @@
       == ObjectSynchronizer::owner_other) {
     // contention will likely happen, so increment the corresponding
     // contention counter.
-    if (loader_lock() == _system_loader_lock_obj) {
+    if (oopDesc::equals(loader_lock(), _system_loader_lock_obj)) {
       ClassLoader::sync_systemLoaderLockContentionRate()->inc();
     } else {
       ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
@@ -2229,7 +2229,7 @@
       // cleared if revocation occurs too often for this type
       // NOTE that we must only do this when the class is initally
       // defined, not each time it is referenced from a new class loader
-      if (k->class_loader() == class_loader()) {
+      if (oopDesc::equals(k->class_loader(), class_loader())) {
         k->set_prototype_header(markOopDesc::biased_locking_prototype());
       }
     }
@@ -2421,7 +2421,7 @@
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
   // Nothing to do if loaders are the same.
-  if (loader1() == loader2()) {
+  if (oopDesc::equals(loader1(), loader2())) {
     return NULL;
   }
 
--- a/src/hotspot/share/code/dependencies.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/code/dependencies.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -1818,12 +1818,12 @@
 
   if (changes == NULL) {
     // Validate all CallSites
-    if (java_lang_invoke_CallSite::target(call_site) != method_handle)
+    if (!oopDesc::equals(java_lang_invoke_CallSite::target(call_site), method_handle))
       return call_site->klass();  // assertion failed
   } else {
     // Validate the given CallSite
-    if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
-      assert(method_handle != changes->method_handle(), "must be");
+    if (oopDesc::equals(call_site, changes->call_site()) && !oopDesc::equals(java_lang_invoke_CallSite::target(call_site), changes->method_handle())) {
+      assert(!oopDesc::equals(method_handle, changes->method_handle()), "must be");
       return call_site->klass();  // assertion failed
     }
   }
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -262,6 +262,10 @@
     static oop resolve(oop obj) {
       return Raw::resolve(obj);
     }
+
+    static bool equals(oop o1, oop o2) {
+      return Raw::equals(o1, o2);
+    }
   };
 };
 
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -2435,7 +2435,7 @@
                   handle_exception);
           result = THREAD->vm_result();
         }
-        if (result == Universe::the_null_sentinel())
+        if (oopDesc::equals(result, Universe::the_null_sentinel()))
           result = NULL;
 
         VERIFY_OOP(result);
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -208,7 +208,7 @@
     if (rindex >= 0) {
       oop coop = m->constants()->resolved_references()->obj_at(rindex);
       oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
-      assert(roop == coop, "expected result for assembly code");
+      assert(oopDesc::equals(roop, coop), "expected result for assembly code");
     }
   }
 #endif
--- a/src/hotspot/share/memory/universe.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -603,12 +603,12 @@
   // preallocated errors with backtrace have been consumed. Also need to avoid
   // a potential loop which could happen if an out of memory occurs when attempting
   // to allocate the backtrace.
-  return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
-          (throwable() != Universe::_out_of_memory_error_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
-          (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
-          (throwable() != Universe::_out_of_memory_error_realloc_objects));
+  return ((!oopDesc::equals(throwable(), Universe::_out_of_memory_error_java_heap)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_class_metaspace))  &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_array_size)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_gc_overhead_limit)) &&
+          (!oopDesc::equals(throwable(), Universe::_out_of_memory_error_realloc_objects)));
 }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/access.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/accessDecorators.hpp"
+
+// This macro allows instantiating selected accesses to be usable from the
+// access.hpp file, to break dependencies to the access.inline.hpp file.
+#define INSTANTIATE_HPP_ACCESS(decorators, T, barrier_type)  \
+  template struct RuntimeDispatch<DecoratorFixup<decorators>::value, T, barrier_type>
+
+namespace AccessInternal {
+  INSTANTIATE_HPP_ACCESS(INTERNAL_EMPTY, oop, BARRIER_EQUALS);
+}
--- a/src/hotspot/share/oops/access.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/access.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -22,16 +22,17 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_HPP
-#define SHARE_VM_RUNTIME_ACCESS_HPP
+#ifndef SHARE_OOPS_ACCESS_HPP
+#define SHARE_OOPS_ACCESS_HPP
 
 #include "memory/allocation.hpp"
-#include "metaprogramming/decay.hpp"
-#include "metaprogramming/integralConstant.hpp"
+#include "oops/accessBackend.hpp"
+#include "oops/accessDecorators.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // = GENERAL =
 // Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
 // A decorator is an attribute or property that affects the way a memory access is performed in some way.
@@ -39,11 +40,12 @@
 // e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
 // Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
 // at callsites such as whether an access is in the heap or not, and others are resolved at runtime
-// such as GC-specific barriers and encoding/decoding compressed oops.
+// such as GC-specific barriers and encoding/decoding compressed oops. For more information about what
+// decorators are available, cf. oops/accessDecorators.hpp.
 // By pipelining handling of these decorators, the design of the Access API allows separation of concern
 // over the different orthogonal concerns of decorators, while providing a powerful way of
 // expressing these orthogonal semantic properties in a unified way.
-
+//
 // == OPERATIONS ==
 // * load: Load a value from an address.
 // * load_at: Load a value from an internal pointer relative to a base object.
@@ -56,329 +58,39 @@
 // * arraycopy: Copy data from one heap array to another heap array.
 // * clone: Clone the contents of an object to a newly allocated object.
 // * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition.
-
-typedef uint64_t DecoratorSet;
-
-// == Internal Decorators - do not use ==
-// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
-// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
-//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
-// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
-const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
-const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
-const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
-
-// == Internal build-time Decorators ==
-// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
-// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
-//   no GC is bundled in the build that is to-space invariant.
-const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
-const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
-
-// == Internal run-time Decorators ==
-// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
-//   access backends iff UseCompressedOops is true.
-const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
-
-const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
-                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
-
-// == Memory Ordering Decorators ==
-// The memory ordering decorators can be described in the following way:
-// === Decorator Rules ===
-// The different types of memory ordering guarantees have a strict order of strength.
-// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
-// property holds too. The names come from the C++11 atomic operations, and typically
-// have a JMM equivalent property.
-// The equivalence may be viewed like this:
-// MO_UNORDERED is equivalent to JMM plain.
-// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
-// MO_RELAXED is equivalent to JMM opaque.
-// MO_ACQUIRE is equivalent to JMM acquire.
-// MO_RELEASE is equivalent to JMM release.
-// MO_SEQ_CST is equivalent to JMM volatile.
+// * equals: Object equality, e.g. when different copies of the same objects are in use (from-space vs. to-space)
 //
-// === Stores ===
-//  * MO_UNORDERED (Default): No guarantees.
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile stores (in the C++ sense).
-//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic stores.
-//    - The stores are atomic.
-//    - Guarantees from volatile stores hold.
-//  * MO_RELEASE: Releasing stores.
-//    - The releasing store will make its preceding memory accesses observable to memory accesses
-//      subsequent to an acquiring load observing this releasing store.
-//    - Guarantees from relaxed stores hold.
-//  * MO_SEQ_CST: Sequentially consistent stores.
-//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from releasing stores hold.
-// === Loads ===
-//  * MO_UNORDERED (Default): No guarantees
-//    - The compiler and hardware are free to reorder aggressively. And they will.
-//  * MO_VOLATILE: Volatile loads (in the C++ sense).
-//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
-//      volatile accesses in program order (but possibly non-volatile accesses).
-//  * MO_RELAXED: Relaxed atomic loads.
-//    - The stores are atomic.
-//    - Guarantees from volatile loads hold.
-//  * MO_ACQUIRE: Acquiring loads.
-//    - An acquiring load will make subsequent memory accesses observe the memory accesses
-//      preceding the releasing store that the acquiring load observed.
-//    - Guarantees from relaxed loads hold.
-//  * MO_SEQ_CST: Sequentially consistent loads.
-//    - These loads observe MO_SEQ_CST stores in the same order on other processors
-//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
-//    - Guarantees from acquiring loads hold.
-// === Atomic Cmpxchg ===
-//  * MO_RELAXED: Atomic but relaxed cmpxchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
-//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
-// === Atomic Xchg ===
-//  * MO_RELAXED: Atomic but relaxed atomic xchg.
-//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
-//  * MO_SEQ_CST: Sequentially consistent xchg.
-//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
-const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
-const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
-const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
-const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
-const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
-const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
-const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
-                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
-
-// === Barrier Strength Decorators ===
-// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
-//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
-//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
-//  - Accesses on oop* translate to raw memory accesses without runtime checks
-//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
-//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
-//  - Accesses on other types translate to raw memory accesses without runtime checks
-// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
-//   marking that the previous value is uninitialized nonsense rather than a real value.
-// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
-//   alive, regardless of the type of reference being accessed. It will however perform the memory access
-//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
-//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
-//   extreme caution in isolated scopes.
-// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
-//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
-//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
-//   decorator for enabling primitive barriers is enabled for the build.
-const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
-const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
-const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
-const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
-const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
-                                             AS_NO_KEEPALIVE | AS_NORMAL;
-
-// === Reference Strength Decorators ===
-// These decorators only apply to accesses on oop-like types (oop/narrowOop).
-// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
-// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
-// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
-//   This is the same ring of strength as jweak and weak oops in the VM.
-// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
-//   This could for example come from the unsafe API.
-// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
-const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
-const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
-const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
-const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
-const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
-                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
-
-// === Access Location ===
-// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
-// The location is important to the GC as it may imply different actions. The following decorators are used:
-// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
-//   be omitted if this decorator is not set.
-// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
-//   for some GCs, and implies that it is an IN_HEAP.
-// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
-// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
-//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
-//   implies that it is also an IN_ROOT.
-const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
-const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
-const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
-const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
-const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
-const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
-                                        IN_ROOT | IN_CONCURRENT_ROOT |
-                                        IN_ARCHIVE_ROOT;
-
-// == Value Decorators ==
-// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
-const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
-const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
-
-// == Arraycopy Decorators ==
-// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
-//   are not guaranteed to be subclasses of the class of the destination array. This requires
-//   a check-cast barrier during the copying operation. If this is not set, it is assumed
-//   that the array is covariant: (the source array type is-a destination array type)
-// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
-//   are disjoint.
-// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
-// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
-// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
-const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
-const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
-const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
-const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
-const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
-const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
-                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
-                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
-
-// The HasDecorator trait can help at compile-time determining whether a decorator set
-// has an intersection with a certain other decorator set
-template <DecoratorSet decorators, DecoratorSet decorator>
-struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
-
-namespace AccessInternal {
-  template <typename T>
-  struct OopOrNarrowOopInternal: AllStatic {
-    typedef oop type;
-  };
-
-  template <>
-  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
-    typedef narrowOop type;
-  };
-
-  // This metafunction returns a canonicalized oop/narrowOop type for a passed
-  // in oop-like types passed in from oop_* overloads where the user has sworn
-  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
-  // narrowOoop, instanceOopDesc*, and random other things).
-  // In the oop_* overloads, it must hold that if the passed in type T is not
-  // narrowOop, then it by contract has to be one of many oop-like types implicitly
-  // convertible to oop, and hence returns oop as the canonical oop type.
-  // If it turns out it was not, then the implicit conversion to oop will fail
-  // to compile, as desired.
-  template <typename T>
-  struct OopOrNarrowOop: AllStatic {
-    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
-  };
-
-  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
-    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  void store_at(oop base, ptrdiff_t offset, T value);
-
-  template <DecoratorSet decorators, typename T>
-  T load_at(oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
-
-  template <DecoratorSet decorators, typename T>
-  T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  void store(P* addr, T value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T load(P* addr);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_cmpxchg(T new_value, P* addr, T compare_value);
-
-  template <DecoratorSet decorators, typename P, typename T>
-  T atomic_xchg(T new_value, P* addr);
-
-  template <DecoratorSet decorators, typename T>
-  bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
-
-  template <DecoratorSet decorators>
-  void clone(oop src, oop dst, size_t size);
-
-  template <DecoratorSet decorators>
-  oop resolve(oop src);
-
-  // Infer the type that should be returned from a load.
-  template <typename P, DecoratorSet decorators>
-  class OopLoadProxy: public StackObj {
-  private:
-    P *const _addr;
-  public:
-    OopLoadProxy(P* addr) : _addr(addr) {}
-
-    inline operator oop() {
-      return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
-    }
-
-    inline operator narrowOop() {
-      return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
-    }
-
-    template <typename T>
-    inline bool operator ==(const T& other) const {
-      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
-    }
-
-    template <typename T>
-    inline bool operator !=(const T& other) const {
-      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
-    }
-  };
-
-  // Infer the type that should be returned from a load_at.
-  template <DecoratorSet decorators>
-  class LoadAtProxy: public StackObj {
-  private:
-    const oop _base;
-    const ptrdiff_t _offset;
-  public:
-    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
-
-    template <typename T>
-    inline operator T() const {
-      return load_at<decorators, T>(_base, _offset);
-    }
-
-    template <typename T>
-    inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
-
-    template <typename T>
-    inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
-  };
-
-  template <DecoratorSet decorators>
-  class OopLoadAtProxy: public StackObj {
-  private:
-    const oop _base;
-    const ptrdiff_t _offset;
-  public:
-    OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
-
-    inline operator oop() const {
-      return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
-    }
-
-    inline operator narrowOop() const {
-      return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
-    }
-
-    template <typename T>
-    inline bool operator ==(const T& other) const {
-      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
-    }
-
-    template <typename T>
-    inline bool operator !=(const T& other) const {
-      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
-    }
-  };
-}
+// == IMPLEMENTATION ==
+// Each access goes through the following steps in a template pipeline.
+// There are essentially 5 steps for each access:
+// * Step 1:   Set default decorators and decay types. This step gets rid of CV qualifiers
+//             and sets default decorators to sensible values.
+// * Step 2:   Reduce types. This step makes sure there is only a single T type and not
+//             multiple types. The P type of the address and T type of the value must
+//             match.
+// * Step 3:   Pre-runtime dispatch. This step checks whether a runtime call can be
+//             avoided, and in that case avoids it (calling raw accesses or
+//             primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4:   Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//             BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//             to the access.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
+//
+// The implementation of step 1-4 resides in in accessBackend.hpp, to allow selected
+// accesses to be accessible from only access.hpp, as opposed to access.inline.hpp.
+// Steps 5.a and 5.b require knowledge about the GC backends, and therefore needs to
+// include the various GC backend .inline.hpp headers. Their implementation resides in
+// access.inline.hpp. The accesses that are allowed through the access.hpp file
+// must be instantiated in access.cpp using the INSTANTIATE_HPP_ACCESS macro.
 
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class Access: public AllStatic {
@@ -554,6 +266,11 @@
     verify_decorators<INTERNAL_EMPTY>();
     return AccessInternal::resolve<decorators>(obj);
   }
+
+  static bool equals(oop o1, oop o2) {
+    verify_decorators<INTERNAL_EMPTY>();
+    return AccessInternal::equals<decorators>(o1, o2);
+  }
 };
 
 // Helper for performing raw accesses (knows only of memory ordering
@@ -571,4 +288,41 @@
 template <DecoratorSet decorators = INTERNAL_EMPTY>
 class RootAccess: public Access<IN_ROOT | decorators> {};
 
-#endif // SHARE_VM_RUNTIME_ACCESS_HPP
+template <DecoratorSet decorators>
+template <DecoratorSet expected_decorators>
+void Access<decorators>::verify_decorators() {
+  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
+  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
+  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
+    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
+    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
+    (barrier_strength_decorators ^ AS_RAW) == 0 ||
+    (barrier_strength_decorators ^ AS_NORMAL) == 0
+  ));
+  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
+  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
+    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
+    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
+  ));
+  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
+  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
+    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
+    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
+    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
+    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
+    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
+  ));
+  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
+  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
+    (location_decorators ^ IN_ROOT) == 0 ||
+    (location_decorators ^ IN_HEAP) == 0 ||
+    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
+    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
+  ));
+}
+
+#endif // SHARE_OOPS_ACCESS_HPP
--- a/src/hotspot/share/oops/access.inline.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/access.inline.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -22,43 +22,28 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
-#define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#ifndef SHARE_OOPS_ACCESS_INLINE_HPP
+#define SHARE_OOPS_ACCESS_INLINE_HPP
 
 #include "gc/shared/barrierSetConfig.inline.hpp"
-#include "metaprogramming/conditional.hpp"
-#include "metaprogramming/isFloatingPoint.hpp"
-#include "metaprogramming/isIntegral.hpp"
-#include "metaprogramming/isPointer.hpp"
-#include "metaprogramming/isVolatile.hpp"
 #include "oops/access.hpp"
 #include "oops/accessBackend.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
 
-// This file outlines the template pipeline of accesses going through the Access
-// API. There are essentially 5 steps for each access.
-// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
-//           and sets default decorators to sensible values.
-// * Step 2: Reduce types. This step makes sure there is only a single T type and not
-//           multiple types. The P type of the address and T type of the value must
-//           match.
-// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
-//           avoided, and in that case avoids it (calling raw accesses or
-//           primitive accesses in a build that does not require primitive GC barriers)
-// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
-//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
-//           to the access.
-// * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
-//           as the address type of an oop on the heap (is it oop* or narrowOop*) to
-//           the appropriate type. It also splits sufficiently orthogonal accesses into
-//           different functions, such as whether the access involves oops or primitives
-//           and whether the access is performed on the heap or outside. Then the
-//           appropriate BarrierSet::AccessBarrier is called to perform the access.
+// This file outlines the last 2 steps of the template pipeline of accesses going through
+// the Access API.
+// * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch
+//             happens for an access. The appropriate BarrierSet::AccessBarrier accessor
+//             is resolved, then the function pointer is updated to that accessor for
+//             future invocations.
+// * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such
+//             as the address type of an oop on the heap (is it oop* or narrowOop*) to
+//             the appropriate type. It also splits sufficiently orthogonal accesses into
+//             different functions, such as whether the access involves oops or primitives
+//             and whether the access is performed on the heap or outside. Then the
+//             appropriate BarrierSet::AccessBarrier is called to perform the access.
 
 namespace AccessInternal {
-
-  // Step 5: Post-runtime dispatch.
+  // Step 5.b: Post-runtime dispatch.
   // This class is the last step before calling the BarrierSet::AccessBarrier.
   // Here we make sure to figure out types that were not known prior to the
   // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
@@ -214,6 +199,13 @@
     }
   };
 
+  template <class GCBarrierType, DecoratorSet decorators>
+  struct PostRuntimeDispatch<GCBarrierType, BARRIER_EQUALS, decorators>: public AllStatic {
+    static bool access_barrier(oop o1, oop o2) {
+      return GCBarrierType::equals(o1, o2);
+    }
+  };
+
   // Resolving accessors with barriers from the barrier set happens in two steps.
   // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
   // 2. Expand paths for each BarrierSet available in the system.
@@ -279,7 +271,7 @@
     }
   };
 
-  // Step 4: Runtime dispatch
+  // Step 5.a: Barrier resolution
   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
   // accessor. This is required when the access either depends on whether compressed oops
   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
@@ -288,888 +280,89 @@
   // it resolves which accessor to be used in future invocations and patches the
   // function pointer to this new accessor.
 
-  template <DecoratorSet decorators, typename T, BarrierType type>
-  struct RuntimeDispatch: AllStatic {};
-
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
-    static func_t _store_func;
-
-    static void store_init(void* addr, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
-      _store_func = function;
-      function(addr, value);
-    }
-
-    static inline void store(void* addr, T value) {
-      _store_func(addr, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
-    static func_t _store_at_func;
-
-    static void store_at_init(oop base, ptrdiff_t offset, T value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
-      _store_at_func = function;
-      function(base, offset, value);
-    }
-
-    static inline void store_at(oop base, ptrdiff_t offset, T value) {
-      _store_at_func(base, offset, value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
-    static func_t _load_func;
-
-    static T load_init(void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
-      _load_func = function;
-      return function(addr);
-    }
-
-    static inline T load(void* addr) {
-      return _load_func(addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
-    static func_t _load_at_func;
-
-    static T load_at_init(oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
-      _load_at_func = function;
-      return function(base, offset);
-    }
-
-    static inline T load_at(oop base, ptrdiff_t offset) {
-      return _load_at_func(base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
-    static func_t _atomic_cmpxchg_func;
-
-    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
-      _atomic_cmpxchg_func = function;
-      return function(new_value, addr, compare_value);
-    }
-
-    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      return _atomic_cmpxchg_func(new_value, addr, compare_value);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
-    static func_t _atomic_cmpxchg_at_func;
-
-    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
-      _atomic_cmpxchg_at_func = function;
-      return function(new_value, base, offset, compare_value);
-    }
-
-    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
-    }
-  };
+  void RuntimeDispatch<decorators, T, BARRIER_STORE>::store_init(void* addr, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
+    _store_func = function;
+    function(addr, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
-    static func_t _atomic_xchg_func;
-
-    static T atomic_xchg_init(T new_value, void* addr) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
-      _atomic_xchg_func = function;
-      return function(new_value, addr);
-    }
-
-    static inline T atomic_xchg(T new_value, void* addr) {
-      return _atomic_xchg_func(new_value, addr);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
-    static func_t _atomic_xchg_at_func;
-
-    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
-      _atomic_xchg_at_func = function;
-      return function(new_value, base, offset);
-    }
-
-    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return _atomic_xchg_at_func(new_value, base, offset);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
-    static func_t _arraycopy_func;
-
-    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
-      _arraycopy_func = function;
-      return function(src_obj, dst_obj, src, dst, length);
-    }
-
-    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
-      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
-    static func_t _clone_func;
-
-    static void clone_init(oop src, oop dst, size_t size) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
-      _clone_func = function;
-      function(src, dst, size);
-    }
-
-    static inline void clone(oop src, oop dst, size_t size) {
-      _clone_func(src, dst, size);
-    }
-  };
-
-  template <DecoratorSet decorators, typename T>
-  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
-    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
-    static func_t _resolve_func;
-
-    static oop resolve_init(oop obj) {
-      func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
-      _resolve_func = function;
-      return function(obj);
-    }
-
-    static inline oop resolve(oop obj) {
-      return _resolve_func(obj);
-    }
-  };
-
-  // Initialize the function pointers to point to the resolving function.
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
-  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
-  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
-
-  template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
-  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+  void RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at_init(oop base, ptrdiff_t offset, T value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
+    _store_at_func = function;
+    function(base, offset, value);
+  }
 
   template <DecoratorSet decorators, typename T>
-  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
-  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
-
-  // Step 3: Pre-runtime dispatching.
-  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
-  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
-  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
-  // not possible.
-  struct PreRuntimeDispatch: AllStatic {
-    template<DecoratorSet decorators>
-    struct CanHardwireRaw: public IntegralConstant<
-      bool,
-      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
-      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
-      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
-    {};
-
-    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
-
-    template<DecoratorSet decorators>
-    static bool is_hardwired_primitive() {
-      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
-             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        Raw::oop_store(addr, value);
-      } else {
-        Raw::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
-    store(void* addr, T value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store(void* addr, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      store<decorators>(field_addr(base, offset), value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    store_at(oop base, ptrdiff_t offset, T value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
-      } else {
-        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::template oop_load<T>(addr);
-      } else {
-        return Raw::template load<T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    load(void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load(void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      return load<decorators, T>(field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    load_at(oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
-      } else {
-        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_atomic_xchg(new_value, addr);
-      } else {
-        return Raw::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg(T new_value, void* addr) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, T>::type
-    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
-        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
-      } else {
-        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (UseCompressedOops) {
-        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators, typename T>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value, bool>::type
-    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      if (is_hardwired_primitive<decorators>()) {
-        const DecoratorSet expanded_decorators = decorators | AS_RAW;
-        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-      } else {
-        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
-      }
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      Raw::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, AS_RAW>::value>::type
-    clone(oop src, oop dst, size_t size) {
-      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
-      return Raw::resolve(obj);
-    }
-
-    template <DecoratorSet decorators>
-    inline static typename EnableIf<
-      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
-    resolve(oop obj) {
-      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
-    }
-  };
-
-  // This class adds implied decorators that follow according to decorator rules.
-  // For example adding default reference strength and default memory ordering
-  // semantics.
-  template <DecoratorSet input_decorators>
-  struct DecoratorFixup: AllStatic {
-    // If no reference strength has been picked, then strong will be picked
-    static const DecoratorSet ref_strength_default = input_decorators |
-      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
-       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
-    // If no memory ordering has been picked, unordered will be picked
-    static const DecoratorSet memory_ordering_default = ref_strength_default |
-      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
-    // If no barrier strength has been picked, normal will be used
-    static const DecoratorSet barrier_strength_default = memory_ordering_default |
-      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
-    // Heap array accesses imply it is a heap access
-    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
-      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
-    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
-      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet archive_root_is_root = conc_root_is_root |
-      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
-    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
-  };
-
-  // Step 2: Reduce types.
-  // Enforce that for non-oop types, T and P have to be strictly the same.
-  // P is the type of the address and T is the type of the values.
-  // As for oop types, it is allow to send T in {narrowOop, oop} and
-  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
-  // the subsequent table. (columns are P, rows are T)
-  // |           | HeapWord  |   oop   | narrowOop |
-  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
-  // | narrowOop |     x     |    x    |  hw-none  |
-  //
-  // x means not allowed
-  // rt-comp means it must be checked at runtime whether the oop is compressed.
-  // hw-none means it is statically known the oop will not be compressed.
-  // hw-comp means it is statically known the oop will be compressed.
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD>::load_init(void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
+    _load_func = function;
+    return function(addr);
+  }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_reduce_types(T* addr, T value) {
-    PreRuntimeDispatch::store<decorators>(addr, value);
-  }
-
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  T RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at_init(oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
+    _load_at_func = function;
+    return function(base, offset);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
+    _atomic_cmpxchg_func = function;
+    return function(new_value, addr, compare_value);
   }
 
-  template <DecoratorSet decorators>
-  inline void store_reduce_types(HeapWord* addr, oop value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  template <DecoratorSet decorators, typename T>
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
+    _atomic_cmpxchg_at_func = function;
+    return function(new_value, base, offset, compare_value);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
-    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_cmpxchg_reduce_types(oop new_value,
-                                         HeapWord* addr,
-                                         oop compare_value) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
-    const DecoratorSet expanded_decorators = decorators;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_reduce_types(T* addr) {
-    return PreRuntimeDispatch::load<decorators, T>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
+    _atomic_xchg_func = function;
+    return function(new_value, addr);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline oop load_reduce_types(HeapWord* addr) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
+    _atomic_xchg_at_func = function;
+    return function(new_value, base, offset);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  template <DecoratorSet decorators>
-  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
-    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
-                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
-    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
-  }
-
-  // Step 1: Set default decorators. This step remembers if a type was volatile
-  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
-  // memory ordering is set for the access, and the implied decorator rules
-  // are applied to select sensible defaults for decorators that have not been
-  // explicitly set. For example, default object referent strength is set to strong.
-  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
-  // and references from the types). This step also perform some type verification
-  // that the passed in types make sense.
-
-  template <DecoratorSet decorators, typename T>
-  static void verify_types(){
-    // If this fails to compile, then you have sent in something that is
-    // not recognized as a valid primitive type to a primitive Access function.
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
-                   (IsPointer<T>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // not allowed primitive type
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline void store(P* addr, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  bool RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
+    _arraycopy_func = function;
+    return function(src_obj, dst_obj, src, dst, length);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline void store_at(oop base, ptrdiff_t offset, T value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT decayed_value = value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T load(P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // If a volatile address is passed in but no memory ordering decorator,
-    // set the memory ordering to MO_VOLATILE by default.
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_VOLATILE | decorators) : decorators>::value;
-    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T load_at(oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
-                                 typename OopOrNarrowOop<T>::type,
-                                 typename Decay<T>::type>::type DecayedT;
-    // Expand the decorators (figure out sensible defaults)
-    // Potentially remember if we need compressed oop awareness
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                            const_cast<DecayedP*>(addr),
-                                                            compare_decayed_value);
+  void RuntimeDispatch<decorators, T, BARRIER_CLONE>::clone_init(oop src, oop dst, size_t size) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
+    _clone_func = function;
+    function(src, dst, size);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    DecayedT compare_decayed_value = compare_value;
-    // Determine default memory ordering
-    const DecoratorSet expanded_decorators = DecoratorFixup<
-      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
-      (MO_SEQ_CST | decorators) : decorators>::value;
-    // Potentially remember that we need compressed oop awareness
-    const DecoratorSet final_decorators = expanded_decorators |
-                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
-    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
-                                                                   offset, compare_decayed_value);
-  }
-
-  template <DecoratorSet decorators, typename P, typename T>
-  inline T atomic_xchg(T new_value, P* addr) {
-    verify_types<decorators, T>();
-    typedef typename Decay<P>::type DecayedP;
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
-    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
-                                                         const_cast<DecayedP*>(addr));
-  }
-
-  template <DecoratorSet decorators, typename T>
-  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
-    verify_types<decorators, T>();
-    typedef typename Decay<T>::type DecayedT;
-    DecayedT new_decayed_value = new_value;
-    // atomic_xchg is only available in SEQ_CST flavour.
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
-                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
-                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
-    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  oop RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::resolve_init(oop obj) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
+    _resolve_func = function;
+    return function(obj);
   }
 
   template <DecoratorSet decorators, typename T>
-  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
-                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
-                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
-    typedef typename Decay<T>::type DecayedT;
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
-    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
-                                                       const_cast<DecayedT*>(src),
-                                                       const_cast<DecayedT*>(dst),
-                                                       length);
-  }
-
-  template <DecoratorSet decorators>
-  inline void clone(oop src, oop dst, size_t size) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
-  }
-
-  template <DecoratorSet decorators>
-  inline oop resolve(oop obj) {
-    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
-    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  bool RuntimeDispatch<decorators, T, BARRIER_EQUALS>::equals_init(oop o1, oop o2) {
+    func_t function = BarrierResolver<decorators, func_t, BARRIER_EQUALS>::resolve_barrier();
+    _equals_func = function;
+    return function(o1, o2);
   }
 }
 
-template <DecoratorSet decorators>
-template <DecoratorSet expected_decorators>
-void Access<decorators>::verify_decorators() {
-  STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
-  const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
-  STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
-    (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
-    (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
-    (barrier_strength_decorators ^ AS_RAW) == 0 ||
-    (barrier_strength_decorators ^ AS_NORMAL) == 0
-  ));
-  const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
-  STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
-    (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
-    (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
-  ));
-  const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
-  STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
-    (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
-    (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
-    (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
-    (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
-    (memory_ordering_decorators ^ MO_SEQ_CST) == 0
-  ));
-  const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
-  STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
-    (location_decorators ^ IN_ROOT) == 0 ||
-    (location_decorators ^ IN_HEAP) == 0 ||
-    (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
-    (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
-  ));
-}
-
-#endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
+#endif // SHARE_OOPS_ACCESS_INLINE_HPP
--- a/src/hotspot/share/oops/accessBackend.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/accessBackend.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -22,16 +22,26 @@
  *
  */
 
-#ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
-#define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+#ifndef SHARE_OOPS_ACCESSBACKEND_HPP
+#define SHARE_OOPS_ACCESSBACKEND_HPP
 
+#include "gc/shared/barrierSetConfig.hpp"
+#include "memory/allocation.hpp"
 #include "metaprogramming/conditional.hpp"
+#include "metaprogramming/decay.hpp"
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/integralConstant.hpp"
+#include "metaprogramming/isFloatingPoint.hpp"
+#include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "metaprogramming/isSame.hpp"
+#include "metaprogramming/isVolatile.hpp"
+#include "oops/accessDecorators.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+
 // This metafunction returns either oop or narrowOop depending on whether
 // an access needs to use compressed oops or not.
 template <DecoratorSet decorators>
@@ -53,7 +63,8 @@
     BARRIER_ATOMIC_XCHG_AT,
     BARRIER_ARRAYCOPY,
     BARRIER_CLONE,
-    BARRIER_RESOLVE
+    BARRIER_RESOLVE,
+    BARRIER_EQUALS
   };
 
   template <DecoratorSet decorators, typename T>
@@ -102,6 +113,7 @@
     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
     typedef oop (*resolve_func_t)(oop obj);
+    typedef bool (*equals_func_t)(oop o1, oop o2);
   };
 
   template <DecoratorSet decorators>
@@ -127,6 +139,7 @@
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
+  ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_EQUALS, equals_func_t);
 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 
   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
@@ -388,6 +401,974 @@
   static void clone(oop src, oop dst, size_t size);
 
   static oop resolve(oop obj) { return obj; }
+
+  static bool equals(oop o1, oop o2) { return o1 == o2; }
 };
 
-#endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
+// Below is the implementation of the first 4 steps of the template pipeline:
+// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
+//           and sets default decorators to sensible values.
+// * Step 2: Reduce types. This step makes sure there is only a single T type and not
+//           multiple types. The P type of the address and T type of the value must
+//           match.
+// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
+//           avoided, and in that case avoids it (calling raw accesses or
+//           primitive accesses in a build that does not require primitive GC barriers)
+// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
+//           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
+//           to the access.
+
+namespace AccessInternal {
+  template <typename T>
+  struct OopOrNarrowOopInternal: AllStatic {
+    typedef oop type;
+  };
+
+  template <>
+  struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
+    typedef narrowOop type;
+  };
+
+  // This metafunction returns a canonicalized oop/narrowOop type for a passed
+  // in oop-like types passed in from oop_* overloads where the user has sworn
+  // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
+  // narrowOoop, instanceOopDesc*, and random other things).
+  // In the oop_* overloads, it must hold that if the passed in type T is not
+  // narrowOop, then it by contract has to be one of many oop-like types implicitly
+  // convertible to oop, and hence returns oop as the canonical oop type.
+  // If it turns out it was not, then the implicit conversion to oop will fail
+  // to compile, as desired.
+  template <typename T>
+  struct OopOrNarrowOop: AllStatic {
+    typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
+  };
+
+  inline void* field_addr(oop base, ptrdiff_t byte_offset) {
+    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
+  }
+  // Step 4: Runtime dispatch
+  // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
+  // accessor. This is required when the access either depends on whether compressed oops
+  // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
+  // barriers). The way it works is that a function pointer initially pointing to an
+  // accessor resolution function gets called for each access. Upon first invocation,
+  // it resolves which accessor to be used in future invocations and patches the
+  // function pointer to this new accessor.
+
+  template <DecoratorSet decorators, typename T, BarrierType type>
+  struct RuntimeDispatch: AllStatic {};
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
+    static func_t _store_func;
+
+    static void store_init(void* addr, T value);
+
+    static inline void store(void* addr, T value) {
+      _store_func(addr, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
+    static func_t _store_at_func;
+
+    static void store_at_init(oop base, ptrdiff_t offset, T value);
+
+    static inline void store_at(oop base, ptrdiff_t offset, T value) {
+      _store_at_func(base, offset, value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
+    static func_t _load_func;
+
+    static T load_init(void* addr);
+
+    static inline T load(void* addr) {
+      return _load_func(addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
+    static func_t _load_at_func;
+
+    static T load_at_init(oop base, ptrdiff_t offset);
+
+    static inline T load_at(oop base, ptrdiff_t offset) {
+      return _load_at_func(base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
+    static func_t _atomic_cmpxchg_func;
+
+    static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value);
+
+    static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      return _atomic_cmpxchg_func(new_value, addr, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
+    static func_t _atomic_cmpxchg_at_func;
+
+    static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value);
+
+    static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
+    static func_t _atomic_xchg_func;
+
+    static T atomic_xchg_init(T new_value, void* addr);
+
+    static inline T atomic_xchg(T new_value, void* addr) {
+      return _atomic_xchg_func(new_value, addr);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
+    static func_t _atomic_xchg_at_func;
+
+    static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
+
+    static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return _atomic_xchg_at_func(new_value, base, offset);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
+    static func_t _arraycopy_func;
+
+    static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length);
+
+    static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+      return _arraycopy_func(src_obj, dst_obj, src, dst, length);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
+    static func_t _clone_func;
+
+    static void clone_init(oop src, oop dst, size_t size);
+
+    static inline void clone(oop src, oop dst, size_t size) {
+      _clone_func(src, dst, size);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
+    static func_t _resolve_func;
+
+    static oop resolve_init(oop obj);
+
+    static inline oop resolve(oop obj) {
+      return _resolve_func(obj);
+    }
+  };
+
+  template <DecoratorSet decorators, typename T>
+  struct RuntimeDispatch<decorators, T, BARRIER_EQUALS>: AllStatic {
+    typedef typename AccessFunction<decorators, T, BARRIER_EQUALS>::type func_t;
+    static func_t _equals_func;
+
+    static bool equals_init(oop o1, oop o2);
+
+    static inline bool equals(oop o1, oop o2) {
+      return _equals_func(o1, o2);
+    }
+  };
+
+  // Initialize the function pointers to point to the resolving function.
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
+  RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
+  RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_CLONE>::type
+  RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
+  RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
+
+  template <DecoratorSet decorators, typename T>
+  typename AccessFunction<decorators, T, BARRIER_EQUALS>::type
+  RuntimeDispatch<decorators, T, BARRIER_EQUALS>::_equals_func = &equals_init;
+
+  // Step 3: Pre-runtime dispatching.
+  // The PreRuntimeDispatch class is responsible for filtering the barrier strength
+  // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
+  // dispatch point. Otherwise it goes through a runtime check if hardwiring was
+  // not possible.
+  struct PreRuntimeDispatch: AllStatic {
+    template<DecoratorSet decorators>
+    struct CanHardwireRaw: public IntegralConstant<
+      bool,
+      !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
+      !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
+      HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
+    {};
+
+    static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
+
+    template<DecoratorSet decorators>
+    static bool is_hardwired_primitive() {
+      return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
+             !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        Raw::oop_store(addr, value);
+      } else {
+        Raw::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
+    store(void* addr, T value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store(void* addr, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      store<decorators>(field_addr(base, offset), value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    store_at(oop base, ptrdiff_t offset, T value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
+      } else {
+        RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::template oop_load<T>(addr);
+      } else {
+        return Raw::template load<T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    load(void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load(void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      return load<decorators, T>(field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    load_at(oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+      } else {
+        return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_atomic_xchg(new_value, addr);
+      } else {
+        return Raw::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg(T new_value, void* addr) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      return atomic_xchg<decorators>(new_value, field_addr(base, offset));
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, T>::type
+    atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+        return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+      } else {
+        return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (UseCompressedOops) {
+        const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators, typename T>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value, bool>::type
+    arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+      if (is_hardwired_primitive<decorators>()) {
+        const DecoratorSet expanded_decorators = decorators | AS_RAW;
+        return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+      } else {
+        return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
+      }
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      Raw::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, AS_RAW>::value>::type
+    clone(oop src, oop dst, size_t size) {
+      RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
+    resolve(oop obj) {
+      return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+      return Raw::equals(o1, o2);
+    }
+
+    template <DecoratorSet decorators>
+    inline static typename EnableIf<
+      !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, bool>::type
+    equals(oop o1, oop o2) {
+      return RuntimeDispatch<decorators, oop, BARRIER_EQUALS>::equals(o1, o2);
+    }
+  };
+
+  // This class adds implied decorators that follow according to decorator rules.
+  // For example adding default reference strength and default memory ordering
+  // semantics.
+  template <DecoratorSet input_decorators>
+  struct DecoratorFixup: AllStatic {
+    // If no reference strength has been picked, then strong will be picked
+    static const DecoratorSet ref_strength_default = input_decorators |
+      (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+       ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+    // If no memory ordering has been picked, unordered will be picked
+    static const DecoratorSet memory_ordering_default = ref_strength_default |
+      ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+    // If no barrier strength has been picked, normal will be used
+    static const DecoratorSet barrier_strength_default = memory_ordering_default |
+      ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+    // Heap array accesses imply it is a heap access
+    static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+      ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+    static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+      ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet archive_root_is_root = conc_root_is_root |
+      ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+    static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
+  };
+
+  // Step 2: Reduce types.
+  // Enforce that for non-oop types, T and P have to be strictly the same.
+  // P is the type of the address and T is the type of the values.
+  // As for oop types, it is allow to send T in {narrowOop, oop} and
+  // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
+  // the subsequent table. (columns are P, rows are T)
+  // |           | HeapWord  |   oop   | narrowOop |
+  // |   oop     |  rt-comp  | hw-none |  hw-comp  |
+  // | narrowOop |     x     |    x    |  hw-none  |
+  //
+  // x means not allowed
+  // rt-comp means it must be checked at runtime whether the oop is compressed.
+  // hw-none means it is statically known the oop will not be compressed.
+  // hw-comp means it is statically known the oop will be compressed.
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_reduce_types(T* addr, T value) {
+    PreRuntimeDispatch::store<decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(narrowOop* addr, narrowOop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators>
+  inline void store_reduce_types(HeapWord* addr, oop value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    PreRuntimeDispatch::store<expanded_decorators>(addr, value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
+    return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_cmpxchg_reduce_types(oop new_value,
+                                         HeapWord* addr,
+                                         oop compare_value) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_reduce_types(T new_value, T* addr) {
+    const DecoratorSet expanded_decorators = decorators;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_reduce_types(T* addr) {
+    return PreRuntimeDispatch::load<decorators, T>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline oop load_reduce_types(HeapWord* addr) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
+    const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+                                             INTERNAL_RT_USE_COMPRESSED_OOPS;
+    return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+  }
+
+  // Step 1: Set default decorators. This step remembers if a type was volatile
+  // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
+  // memory ordering is set for the access, and the implied decorator rules
+  // are applied to select sensible defaults for decorators that have not been
+  // explicitly set. For example, default object referent strength is set to strong.
+  // This step also decays the types passed in (e.g. getting rid of CV qualifiers
+  // and references from the types). This step also perform some type verification
+  // that the passed in types make sense.
+
+  template <DecoratorSet decorators, typename T>
+  static void verify_types(){
+    // If this fails to compile, then you have sent in something that is
+    // not recognized as a valid primitive type to a primitive Access function.
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
+                   (IsPointer<T>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // not allowed primitive type
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline void store(P* addr, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline void store_at(oop base, ptrdiff_t offset, T value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT decayed_value = value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T load(P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // If a volatile address is passed in but no memory ordering decorator,
+    // set the memory ordering to MO_VOLATILE by default.
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_VOLATILE | decorators) : decorators>::value;
+    return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T load_at(oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
+                                 typename OopOrNarrowOop<T>::type,
+                                 typename Decay<T>::type>::type DecayedT;
+    // Expand the decorators (figure out sensible defaults)
+    // Potentially remember if we need compressed oop awareness
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                            const_cast<DecayedP*>(addr),
+                                                            compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    DecayedT compare_decayed_value = compare_value;
+    // Determine default memory ordering
+    const DecoratorSet expanded_decorators = DecoratorFixup<
+      (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
+      (MO_SEQ_CST | decorators) : decorators>::value;
+    // Potentially remember that we need compressed oop awareness
+    const DecoratorSet final_decorators = expanded_decorators |
+                                          (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                           INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
+    return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
+                                                                   offset, compare_decayed_value);
+  }
+
+  template <DecoratorSet decorators, typename P, typename T>
+  inline T atomic_xchg(T new_value, P* addr) {
+    verify_types<decorators, T>();
+    typedef typename Decay<P>::type DecayedP;
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
+    return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
+                                                         const_cast<DecayedP*>(addr));
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+    verify_types<decorators, T>();
+    typedef typename Decay<T>::type DecayedT;
+    DecayedT new_decayed_value = new_value;
+    // atomic_xchg is only available in SEQ_CST flavour.
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
+                                             (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
+                                              INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
+    return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+  }
+
+  template <DecoratorSet decorators, typename T>
+  inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+    STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
+                   (IsSame<T, void>::value || IsIntegral<T>::value) ||
+                    IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
+    typedef typename Decay<T>::type DecayedT;
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
+    return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
+                                                       const_cast<DecayedT*>(src),
+                                                       const_cast<DecayedT*>(dst),
+                                                       length);
+  }
+
+  template <DecoratorSet decorators>
+  inline void clone(oop src, oop dst, size_t size) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
+  }
+
+  template <DecoratorSet decorators>
+  inline oop resolve(oop obj) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
+  }
+
+  template <DecoratorSet decorators>
+  inline bool equals(oop o1, oop o2) {
+    const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
+    return PreRuntimeDispatch::equals<expanded_decorators>(o1, o2);
+  }
+
+  // Infer the type that should be returned from an Access::oop_load.
+  template <typename P, DecoratorSet decorators>
+  class OopLoadProxy: public StackObj {
+  private:
+    P *const _addr;
+  public:
+    OopLoadProxy(P* addr) : _addr(addr) {}
+
+    inline operator oop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
+    }
+
+    inline operator narrowOop() {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
+    }
+  };
+
+  // Infer the type that should be returned from an Access::load_at.
+  template <DecoratorSet decorators>
+  class LoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    template <typename T>
+    inline operator T() const {
+      return load_at<decorators, T>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
+  };
+
+  // Infer the type that should be returned from an Access::oop_load_at.
+  template <DecoratorSet decorators>
+  class OopLoadAtProxy: public StackObj {
+  private:
+    const oop _base;
+    const ptrdiff_t _offset;
+  public:
+    OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
+
+    inline operator oop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
+    }
+
+    inline operator narrowOop() const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
+    }
+
+    template <typename T>
+    inline bool operator ==(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
+    }
+
+    template <typename T>
+    inline bool operator !=(const T& other) const {
+      return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
+    }
+  };
+}
+
+#endif // SHARE_OOPS_ACCESSBACKEND_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/accessDecorators.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_ACCESSDECORATORS_HPP
+#define SHARE_OOPS_ACCESSDECORATORS_HPP
+
+// A decorator is an attribute or property that affects the way a memory access is performed in some way.
+// There are different groups of decorators. Some have to do with memory ordering, others to do with,
+// e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
+// Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
+// at callsites such as whether an access is in the heap or not, and others are resolved at runtime
+// such as GC-specific barriers and encoding/decoding compressed oops.
+typedef uint64_t DecoratorSet;
+
+// The HasDecorator trait can help at compile-time determining whether a decorator set
+// has an intersection with a certain other decorator set
+template <DecoratorSet decorators, DecoratorSet decorator>
+struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
+
+// == Internal Decorators - do not use ==
+// * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
+// * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
+//   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
+// * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
+const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
+const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
+const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
+
+// == Internal build-time Decorators ==
+// * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
+// * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
+//   no GC is bundled in the build that is to-space invariant.
+const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
+const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
+
+// == Internal run-time Decorators ==
+// * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
+//   access backends iff UseCompressedOops is true.
+const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
+
+const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
+                                                       INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
+
+// == Memory Ordering Decorators ==
+// The memory ordering decorators can be described in the following way:
+// === Decorator Rules ===
+// The different types of memory ordering guarantees have a strict order of strength.
+// Explicitly specifying the stronger ordering implies that the guarantees of the weaker
+// property holds too. The names come from the C++11 atomic operations, and typically
+// have a JMM equivalent property.
+// The equivalence may be viewed like this:
+// MO_UNORDERED is equivalent to JMM plain.
+// MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
+// MO_RELAXED is equivalent to JMM opaque.
+// MO_ACQUIRE is equivalent to JMM acquire.
+// MO_RELEASE is equivalent to JMM release.
+// MO_SEQ_CST is equivalent to JMM volatile.
+//
+// === Stores ===
+//  * MO_UNORDERED (Default): No guarantees.
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile stores (in the C++ sense).
+//    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic stores.
+//    - The stores are atomic.
+//    - Guarantees from volatile stores hold.
+//  * MO_RELEASE: Releasing stores.
+//    - The releasing store will make its preceding memory accesses observable to memory accesses
+//      subsequent to an acquiring load observing this releasing store.
+//    - Guarantees from relaxed stores hold.
+//  * MO_SEQ_CST: Sequentially consistent stores.
+//    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from releasing stores hold.
+// === Loads ===
+//  * MO_UNORDERED (Default): No guarantees
+//    - The compiler and hardware are free to reorder aggressively. And they will.
+//  * MO_VOLATILE: Volatile loads (in the C++ sense).
+//    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
+//      volatile accesses in program order (but possibly non-volatile accesses).
+//  * MO_RELAXED: Relaxed atomic loads.
+//    - The loads are atomic.
+//    - Guarantees from volatile loads hold.
+//  * MO_ACQUIRE: Acquiring loads.
+//    - An acquiring load will make subsequent memory accesses observe the memory accesses
+//      preceding the releasing store that the acquiring load observed.
+//    - Guarantees from relaxed loads hold.
+//  * MO_SEQ_CST: Sequentially consistent loads.
+//    - These loads observe MO_SEQ_CST stores in the same order on other processors
+//    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
+//    - Guarantees from acquiring loads hold.
+// === Atomic Cmpxchg ===
+//  * MO_RELAXED: Atomic but relaxed cmpxchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
+//  * MO_SEQ_CST: Sequentially consistent cmpxchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
+// === Atomic Xchg ===
+//  * MO_RELAXED: Atomic but relaxed atomic xchg.
+//    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
+//  * MO_SEQ_CST: Sequentially consistent xchg.
+//    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
+const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
+const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
+const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
+const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
+const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
+const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
+const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
+                                       MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
+
+// === Barrier Strength Decorators ===
+// * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
+//   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
+//   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
+//  - Accesses on oop* translate to raw memory accesses without runtime checks
+//  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
+//  - Accesses on HeapWord* translate to a runtime check choosing one of the above
+//  - Accesses on other types translate to raw memory accesses without runtime checks
+// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
+//   marking that the previous value is uninitialized nonsense rather than a real value.
+// * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
+//   alive, regardless of the type of reference being accessed. It will however perform the memory access
+//   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
+//   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
+//   extreme caution in isolated scopes.
+// * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
+//   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
+//   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
+//   decorator for enabling primitive barriers is enabled for the build.
+const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
+const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
+const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
+const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
+const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
+                                             AS_NO_KEEPALIVE | AS_NORMAL;
+
+// === Reference Strength Decorators ===
+// These decorators only apply to accesses on oop-like types (oop/narrowOop).
+// * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
+// * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
+// * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
+//   This is the same ring of strength as jweak and weak oops in the VM.
+// * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
+//   This could for example come from the unsafe API.
+// * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
+const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
+const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
+const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
+const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
+const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
+                                        ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
+
+// === Access Location ===
+// Accesses can take place in, e.g. the heap, old or young generation and different native roots.
+// The location is important to the GC as it may imply different actions. The following decorators are used:
+// * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
+//   be omitted if this decorator is not set.
+// * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
+//   for some GCs, and implies that it is an IN_HEAP.
+// * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
+// * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
+//   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
+//   implies that it is also an IN_ROOT.
+const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
+const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
+const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
+const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
+const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
+const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
+                                        IN_ROOT | IN_CONCURRENT_ROOT |
+                                        IN_ARCHIVE_ROOT;
+
+// == Value Decorators ==
+// * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
+const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
+const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
+
+// == Arraycopy Decorators ==
+// * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
+//   are not guaranteed to be subclasses of the class of the destination array. This requires
+//   a check-cast barrier during the copying operation. If this is not set, it is assumed
+//   that the array is covariant: (the source array type is-a destination array type)
+// * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
+//   are disjoint.
+// * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
+// * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
+// * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
+const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
+const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
+const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
+const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
+const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
+const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
+                                                    ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
+                                                    ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
+
+#endif // SHARE_OOPS_ACCESSDECORATORS_HPP
--- a/src/hotspot/share/oops/constantPool.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/constantPool.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -841,7 +841,7 @@
   if (cache_index >= 0) {
     result_oop = this_cp->resolved_references()->obj_at(cache_index);
     if (result_oop != NULL) {
-      if (result_oop == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(result_oop, Universe::the_null_sentinel())) {
         DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
         assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
         result_oop = NULL;
@@ -1074,12 +1074,12 @@
     } else {
       // Return the winning thread's result.  This can be different than
       // the result here for MethodHandles.
-      if (old_result == Universe::the_null_sentinel())
+      if (oopDesc::equals(old_result, Universe::the_null_sentinel()))
         old_result = NULL;
       return old_result;
     }
   } else {
-    assert(result_oop != Universe::the_null_sentinel(), "");
+    assert(!oopDesc::equals(result_oop, Universe::the_null_sentinel()), "");
     return result_oop;
   }
 }
@@ -1245,7 +1245,7 @@
 oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, int obj_index, TRAPS) {
   // If the string has already been interned, this entry will be non-null
   oop str = this_cp->resolved_references()->obj_at(obj_index);
-  assert(str != Universe::the_null_sentinel(), "");
+  assert(!oopDesc::equals(str, Universe::the_null_sentinel()), "");
   if (str != NULL) return str;
   Symbol* sym = this_cp->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
--- a/src/hotspot/share/oops/instanceKlass.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -2401,7 +2401,7 @@
   // and package entries. Both must be the same. This rule
   // applies even to classes that are defined in the unnamed
   // package, they still must have the same class loader.
-  if ((classloader1 == classloader2) && (classpkg1 == classpkg2)) {
+  if (oopDesc::equals(classloader1, classloader2) && (classpkg1 == classpkg2)) {
     return true;
   }
 
@@ -2412,7 +2412,7 @@
 // and classname information is enough to determine a class's package
 bool InstanceKlass::is_same_class_package(oop other_class_loader,
                                           const Symbol* other_class_name) const {
-  if (class_loader() != other_class_loader) {
+  if (!oopDesc::equals(class_loader(), other_class_loader)) {
     return false;
   }
   if (name()->fast_compare(other_class_name) == 0) {
--- a/src/hotspot/share/oops/klassVtable.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/klassVtable.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -497,7 +497,7 @@
           // to link to the first super, and we get all the others.
           Handle super_loader(THREAD, super_klass->class_loader());
 
-          if (target_loader() != super_loader()) {
+          if (!oopDesc::equals(target_loader(), super_loader())) {
             ResourceMark rm(THREAD);
             Symbol* failed_type_symbol =
               SystemDictionary::check_signature_loaders(signature, target_loader,
@@ -1226,7 +1226,7 @@
       // if checkconstraints requested
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
-        if (method_holder_loader() != interface_loader()) {
+        if (!oopDesc::equals(method_holder_loader(), interface_loader())) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
             SystemDictionary::check_signature_loaders(m->signature(),
--- a/src/hotspot/share/oops/objArrayKlass.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/objArrayKlass.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -220,7 +220,7 @@
 // Either oop or narrowOop depending on UseCompressedOops.
 template <class T> void ObjArrayKlass::do_copy(arrayOop s, T* src,
                                arrayOop d, T* dst, int length, TRAPS) {
-  if (s == d) {
+  if (oopDesc::equals(s, d)) {
     // since source and destination are equal we do not need conversion checks.
     assert(length > 0, "sanity check");
     HeapAccess<>::oop_arraycopy(s, d, src, dst, length);
--- a/src/hotspot/share/oops/oop.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -142,6 +142,8 @@
     }
   }
 
+  inline static bool equals(oop o1, oop o2) { return Access<>::equals(o1, o2); }
+
   // Access to fields in a instanceOop through these methods.
   template <DecoratorSet decorator>
   oop obj_field_access(int offset) const;
--- a/src/hotspot/share/prims/jni.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/prims/jni.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -583,7 +583,7 @@
   oop super_mirror = JNIHandles::resolve_non_null(super);
   if (java_lang_Class::is_primitive(sub_mirror) ||
       java_lang_Class::is_primitive(super_mirror)) {
-    jboolean ret = (sub_mirror == super_mirror);
+    jboolean ret = oopDesc::equals(sub_mirror, super_mirror);
 
     HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
     return ret;
@@ -823,7 +823,7 @@
 
   oop a = JNIHandles::resolve(r1);
   oop b = JNIHandles::resolve(r2);
-  jboolean ret = (a == b) ? JNI_TRUE : JNI_FALSE;
+  jboolean ret = oopDesc::equals(a, b) ? JNI_TRUE : JNI_FALSE;
 
   HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
   return ret;
--- a/src/hotspot/share/prims/jvm.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/prims/jvm.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -1364,7 +1364,7 @@
       protection_domain = method->method_holder()->protection_domain();
     }
 
-    if ((previous_protection_domain != protection_domain) && (protection_domain != NULL)) {
+    if ((!oopDesc::equals(previous_protection_domain, protection_domain)) && (protection_domain != NULL)) {
       local_array->push(protection_domain);
       previous_protection_domain = protection_domain;
     }
--- a/src/hotspot/share/prims/methodHandles.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/prims/methodHandles.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -972,7 +972,7 @@
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
         oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
@@ -1024,7 +1024,7 @@
           return -99;  // caller bug!
         CallInfo info(m, NULL, CHECK_0);
         oop saved = MethodHandles::init_method_MemberName(result, info);
-        if (saved != result())
+        if (!oopDesc::equals(saved, result()))
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
         match_flags = 0; break; // got tired of looking at overflow
--- a/src/hotspot/share/prims/stackwalk.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/prims/stackwalk.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -48,7 +48,7 @@
 bool BaseFrameStream::check_magic(objArrayHandle frames_array) {
   oop   m1 = frames_array->obj_at(magic_pos);
   jlong m2 = _anchor;
-  if (m1 == _thread->threadObj() && m2 == address_value())  return true;
+  if (oopDesc::equals(m1, _thread->threadObj()) && m2 == address_value())  return true;
   return false;
 }
 
@@ -79,7 +79,7 @@
 {
   assert(thread != NULL && thread->is_Java_thread(), "");
   oop m1 = frames_array->obj_at(magic_pos);
-  if (m1 != thread->threadObj())      return NULL;
+  if (!oopDesc::equals(m1, thread->threadObj())) return NULL;
   if (magic == 0L)                    return NULL;
   BaseFrameStream* stream = (BaseFrameStream*) (intptr_t) magic;
   if (!stream->is_valid_in(thread, frames_array))   return NULL;
--- a/src/hotspot/share/prims/unsafe.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/prims/unsafe.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -897,7 +897,7 @@
   oop p = JNIHandles::resolve(obj);
   assert_field_offset_sane(p, offset);
   oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
-  return ret == e;
+  return oopDesc::equals(ret, e);
 } UNSAFE_END
 
 UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -254,7 +254,7 @@
   BasicLock* highest_lock = NULL;
   for (int i = 0; i < cached_monitor_info->length(); i++) {
     MonitorInfo* mon_info = cached_monitor_info->at(i);
-    if (mon_info->owner() == obj) {
+    if (oopDesc::equals(mon_info->owner(), obj)) {
       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
                                p2i((void *) mon_info->owner()),
                                p2i((void *) obj));
--- a/src/hotspot/share/runtime/handles.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/runtime/handles.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -77,8 +77,9 @@
   // General access
   oop     operator () () const                   { return obj(); }
   oop     operator -> () const                   { return non_null_obj(); }
-  bool    operator == (oop o) const              { return obj() == o; }
-  bool    operator == (const Handle& h) const          { return obj() == h.obj(); }
+
+  bool operator == (oop o) const                 { return oopDesc::equals(obj(), o); }
+  bool operator == (const Handle& h) const       { return oopDesc::equals(obj(), h.obj()); }
 
   // Null checks
   bool    is_null() const                        { return _handle == NULL; }
--- a/src/hotspot/share/runtime/reflection.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/runtime/reflection.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -418,7 +418,7 @@
     assert(lower_dim->is_array_klass(), "just checking");
     result2 = lower_dim->java_mirror();
   }
-  assert(result == result2, "results must be consistent");
+  assert(oopDesc::equals(result, result2), "results must be consistent");
 #endif //ASSERT
   return result;
 }
--- a/src/hotspot/share/runtime/synchronizer.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/runtime/synchronizer.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -173,7 +173,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const mon = mark->monitor();
-    assert(mon->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
     if (mon->owner() != self) return false;  // slow-path for IMS exception
 
     if (mon->first_waiter() != NULL) {
@@ -217,7 +217,7 @@
 
   if (mark->has_monitor()) {
     ObjectMonitor * const m = mark->monitor();
-    assert(m->object() == obj, "invariant");
+    assert(oopDesc::equals((oop) m->object(), obj), "invariant");
     Thread * const owner = (Thread *) m->_owner;
 
     // Lock contention and Transactional Lock Elision (TLE) diagnostics
@@ -1404,7 +1404,7 @@
     if (mark->has_monitor()) {
       ObjectMonitor * inf = mark->monitor();
       assert(inf->header()->is_neutral(), "invariant");
-      assert(inf->object() == object, "invariant");
+      assert(oopDesc::equals((oop) inf->object(), object), "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
       return inf;
     }
--- a/src/hotspot/share/services/memoryManager.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/services/memoryManager.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcCause.hpp"
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/timer.hpp"
@@ -68,7 +69,7 @@
 
   void add_pool(MemoryPool* pool);
 
-  bool is_manager(instanceHandle mh)     { return mh() == _memory_mgr_obj; }
+  bool is_manager(instanceHandle mh)     { return oopDesc::equals(mh(), _memory_mgr_obj); }
 
   virtual instanceOop get_memory_manager_instance(TRAPS);
   virtual bool is_gc_memory_manager()    { return false; }
--- a/src/hotspot/share/services/memoryPool.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/services/memoryPool.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_SERVICES_MEMORYPOOL_HPP
 
 #include "memory/heap.hpp"
+#include "oops/oop.hpp"
 #include "services/memoryUsage.hpp"
 #include "utilities/macros.hpp"
 
@@ -92,7 +93,7 @@
   // max size could be changed
   virtual size_t max_size()    const       { return _max_size; }
 
-  bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
+  bool is_pool(instanceHandle pool) { return oopDesc::equals(pool(), _memory_pool_obj); }
 
   bool available_for_allocation()   { return _available_for_allocation; }
   bool set_available_for_allocation(bool value) {
--- a/src/hotspot/share/services/threadService.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/services/threadService.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -607,7 +607,7 @@
     for (int j = 0; j < len; j++) {
       oop monitor = locked_monitors->at(j);
       assert(monitor != NULL, "must be a Java object");
-      if (monitor == object) {
+      if (oopDesc::equals(monitor, object)) {
         found = true;
         break;
       }
--- a/src/hotspot/share/utilities/exceptions.cpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/utilities/exceptions.cpp	Tue Apr 03 13:15:27 2018 +0200
@@ -443,9 +443,9 @@
 volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0;
 
 void Exceptions::count_out_of_memory_exceptions(Handle exception) {
-  if (exception() == Universe::out_of_memory_error_metaspace()) {
+  if (oopDesc::equals(exception(), Universe::out_of_memory_error_metaspace())) {
      Atomic::inc(&_out_of_memory_error_metaspace_errors);
-  } else if (exception() == Universe::out_of_memory_error_class_metaspace()) {
+  } else if (oopDesc::equals(exception(), Universe::out_of_memory_error_class_metaspace())) {
      Atomic::inc(&_out_of_memory_error_class_metaspace_errors);
   } else {
      // everything else reported as java heap OOM
--- a/src/hotspot/share/utilities/growableArray.hpp	Tue Apr 03 10:27:46 2018 +0200
+++ b/src/hotspot/share/utilities/growableArray.hpp	Tue Apr 03 13:15:27 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
 
 #include "memory/allocation.hpp"
+#include "oops/oop.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
@@ -211,6 +212,15 @@
 
   void print();
 
+  inline static bool safe_equals(oop obj1, oop obj2) {
+    return oopDesc::equals(obj1, obj2);
+  }
+
+  template <class X>
+  inline static bool safe_equals(X i1, X i2) {
+    return i1 == i2;
+  }
+
   int append(const E& elem) {
     check_nesting();
     if (_len == _max) grow(_len);
@@ -295,7 +305,7 @@
 
   bool contains(const E& elem) const {
     for (int i = 0; i < _len; i++) {
-      if (_data[i] == elem) return true;
+      if (safe_equals(_data[i], elem)) return true;
     }
     return false;
   }