hotspot/src/share/vm/oops/oop.inline.hpp
changeset 35498 392b50de06c6
parent 33611 9abd65805e19
child 35862 411842d0c882
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Jan 12 21:17:13 2016 +0400
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Mon Jan 04 15:41:05 2016 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,17 +41,65 @@
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
 
+inline void update_barrier_set(void* p, oop v, bool release = false) {
+  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
+  oopDesc::bs()->write_ref_field(p, v, release);
+}
+
+template <class T> inline void update_barrier_set_pre(T* p, oop v) {
+  oopDesc::bs()->write_ref_field_pre(p, v);
+}
+
+template <class T> void oop_store(T* p, oop v) {
+  if (always_do_update_barrier) {
+    oop_store((volatile T*)p, v);
+  } else {
+    update_barrier_set_pre(p, v);
+    oopDesc::encode_store_heap_oop(p, v);
+    // always_do_update_barrier == false =>
+    // Either we are at a safepoint (in GC) or CMS is not used. In both
+    // cases it's unnecessary to mark the card as dirty with release sematics.
+    update_barrier_set((void*)p, v, false /* release */);  // cast away type
+  }
+}
+
+template <class T> void oop_store(volatile T* p, oop v) {
+  update_barrier_set_pre((T*)p, v);   // cast away volatile
+  // Used by release_obj_field_put, so use release_store_ptr.
+  oopDesc::release_encode_store_heap_oop(p, v);
+  // When using CMS we must mark the card corresponding to p as dirty
+  // with release sematics to prevent that CMS sees the dirty card but
+  // not the new value v at p due to reordering of the two
+  // stores. Note that CMS has a concurrent precleaning phase, where
+  // it reads the card table while the Java threads are running.
+  update_barrier_set((void*)p, v, true /* release */);    // cast away type
+}
+
+// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
+// (without having to remember the function name this calls).
+inline void oop_store_raw(HeapWord* addr, oop value) {
+  if (UseCompressedOops) {
+    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
+  } else {
+    oopDesc::encode_store_heap_oop((oop*)addr, value);
+  }
+}
+
 // Implementation of all inlined member functions defined in oop.hpp
 // We need a separate file to avoid circular references
 
-inline void oopDesc::release_set_mark(markOop m) {
+void oopDesc::release_set_mark(markOop m) {
   OrderAccess::release_store_ptr(&_mark, m);
 }
 
-inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
+markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
   return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
 }
 
+void oopDesc::init_mark() {
+  set_mark(markOopDesc::prototype_for_object(this));
+}
+
 inline Klass* oopDesc::klass() const {
   if (UseCompressedClassPointers) {
     return Klass::decode_klass_not_null(_metadata._compressed_klass);
@@ -60,7 +108,7 @@
   }
 }
 
-inline Klass* oopDesc::klass_or_null() const volatile {
+Klass* oopDesc::klass_or_null() const volatile {
   // can be NULL in CMS
   if (UseCompressedClassPointers) {
     return Klass::decode_klass(_metadata._compressed_klass);
@@ -69,14 +117,14 @@
   }
 }
 
-inline Klass** oopDesc::klass_addr() {
+Klass** oopDesc::klass_addr() {
   // Only used internally and with CMS and will not work with
   // UseCompressedOops
   assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
   return (Klass**) &_metadata._klass;
 }
 
-inline narrowKlass* oopDesc::compressed_klass_addr() {
+narrowKlass* oopDesc::compressed_klass_addr() {
   assert(UseCompressedClassPointers, "only called by compressed klass pointers");
   return &_metadata._compressed_klass;
 }
@@ -92,7 +140,7 @@
   }
 }
 
-inline int oopDesc::klass_gap() const {
+int oopDesc::klass_gap() const {
   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
 }
 
@@ -102,7 +150,7 @@
   }
 }
 
-inline void oopDesc::set_klass_to_list_ptr(oop k) {
+void oopDesc::set_klass_to_list_ptr(oop k) {
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
   if (UseCompressedClassPointers) {
@@ -112,7 +160,7 @@
   }
 }
 
-inline oop oopDesc::list_ptr_from_klass() {
+oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
   if (UseCompressedClassPointers) {
     return decode_heap_oop((narrowOop)_metadata._compressed_klass);
@@ -122,261 +170,15 @@
   }
 }
 
-inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
-
-inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
-
-inline bool oopDesc::is_instance() const {
-  return klass()->is_instance_klass();
-}
-
-inline bool oopDesc::is_array()               const { return klass()->is_array_klass(); }
-inline bool oopDesc::is_objArray()            const { return klass()->is_objArray_klass(); }
-inline bool oopDesc::is_typeArray()           const { return klass()->is_typeArray_klass(); }
-
-inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
-
-template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
-inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
-inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
-inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
-inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
-inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
-inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
-inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
-inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
-inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
-inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
-
-
-// Functions for getting and setting oops within instance objects.
-// If the oops are compressed, the type passed to these overloaded functions
-// is narrowOop.  All functions are overloaded so they can be called by
-// template functions without conditionals (the compiler instantiates via
-// the right type and inlines the appopriate code).
-
-inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
-inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
-
-// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
-// offset from the heap base.  Saving the check for null can save instructions
-// in inner GC loops so these are separated.
-
-inline bool check_obj_alignment(oop obj) {
-  return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
-}
-
-inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
-  assert(!is_null(v), "oop value can never be zero");
-  assert(check_obj_alignment(v), "Address not aligned");
-  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
-  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
-  uint64_t result = pd >> shift;
-  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
-  assert(decode_heap_oop(result) == v, "reversibility");
-  return (narrowOop)result;
-}
-
-inline narrowOop oopDesc::encode_heap_oop(oop v) {
-  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
-}
-
-inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
-  assert(!is_null(v), "narrow oop value can never be zero");
-  address base = Universe::narrow_oop_base();
-  int    shift = Universe::narrow_oop_shift();
-  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
-  assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
-  return result;
-}
-
-inline oop oopDesc::decode_heap_oop(narrowOop v) {
-  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
-}
-
-inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
-inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
-
-// Load an oop out of the Java heap as is without decoding.
-// Called by GC to check for null before decoding.
-inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
-inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
-
-// Load and decode an oop out of the Java heap into a wide oop.
-inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
-inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
-  return decode_heap_oop_not_null(*p);
-}
-
-// Load and decode an oop out of the heap accepting null
-inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
-inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
-  return decode_heap_oop(*p);
-}
-
-// Store already encoded heap oop into the heap.
-inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
-inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
-
-// Encode and store a heap oop.
-inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
-  *p = encode_heap_oop_not_null(v);
-}
-inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
-
-// Encode and store a heap oop allowing for null.
-inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
-  *p = encode_heap_oop(v);
-}
-inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
-
-// Store heap oop as is for volatile fields.
-inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
-  OrderAccess::release_store_ptr(p, v);
-}
-inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
-                                            narrowOop v) {
-  OrderAccess::release_store(p, v);
-}
-
-inline void oopDesc::release_encode_store_heap_oop_not_null(
-                                                volatile narrowOop* p, oop v) {
-  // heap oop is not pointer sized.
-  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
+bool oopDesc::is_a(Klass* k) const {
+  return klass()->is_subtype_of(k);
 }
 
-inline void oopDesc::release_encode_store_heap_oop_not_null(
-                                                      volatile oop* p, oop v) {
-  OrderAccess::release_store_ptr(p, v);
-}
-
-inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
-                                                           oop v) {
-  OrderAccess::release_store_ptr(p, v);
-}
-inline void oopDesc::release_encode_store_heap_oop(
-                                                volatile narrowOop* p, oop v) {
-  OrderAccess::release_store(p, encode_heap_oop(v));
-}
-
-
-// These functions are only used to exchange oop fields in instances,
-// not headers.
-inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
-  if (UseCompressedOops) {
-    // encode exchange value from oop to T
-    narrowOop val = encode_heap_oop(exchange_value);
-    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
-    // decode old from T to oop
-    return decode_heap_oop(old);
-  } else {
-    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
-  }
-}
-
-// In order to put or get a field out of an instance, must first check
-// if the field has been compressed and uncompress it.
-inline oop oopDesc::obj_field(int offset) const {
-  return UseCompressedOops ?
-    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
-    load_decode_heap_oop(obj_field_addr<oop>(offset));
-}
-
-inline void oopDesc::obj_field_put(int offset, oop value) {
-  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
-                      oop_store(obj_field_addr<oop>(offset),       value);
-}
-
-inline Metadata* oopDesc::metadata_field(int offset) const {
-  return *metadata_field_addr(offset);
-}
-
-inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
-  *metadata_field_addr(offset) = value;
-}
-
-inline void oopDesc::obj_field_put_raw(int offset, oop value) {
-  UseCompressedOops ?
-    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
-    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
-}
-inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
-  OrderAccess::release();
-  obj_field_put(offset, value);
-  OrderAccess::fence();
+inline int oopDesc::size()  {
+  return size_given_klass(klass());
 }
 
-inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
-inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
-
-inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
-inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
-
-inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
-inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
-
-inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
-inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
-
-inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
-inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
-
-inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
-inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
-
-inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
-inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
-
-inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
-inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
-
-inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
-inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
-
-inline oop oopDesc::obj_field_acquire(int offset) const {
-  return UseCompressedOops ?
-             decode_heap_oop((narrowOop)
-               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
-           : decode_heap_oop((oop)
-               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
-}
-inline void oopDesc::release_obj_field_put(int offset, oop value) {
-  UseCompressedOops ?
-    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
-    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
-}
-
-inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
-inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
-
-inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
-inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
-
-inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
-inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
-
-inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
-inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
-
-inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
-inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
-
-inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
-inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
-
-inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
-inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
-
-inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
-inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
-
-inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
-inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
-
-inline int oopDesc::size_given_klass(Klass* klass)  {
+int oopDesc::size_given_klass(Klass* klass)  {
   int lh = klass->layout_helper();
   int s;
 
@@ -461,59 +263,133 @@
   return s;
 }
 
+bool oopDesc::is_instance()  const { return klass()->is_instance_klass();  }
+inline bool oopDesc::is_array()     const { return klass()->is_array_klass();     }
+bool oopDesc::is_objArray()  const { return klass()->is_objArray_klass();  }
+bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
 
-inline int oopDesc::size()  {
-  return size_given_klass(klass());
+void*      oopDesc::field_base(int offset)          const { return (void*)&((char*)this)[offset]; }
+
+jbyte*     oopDesc::byte_field_addr(int offset)     const { return (jbyte*)    field_base(offset); }
+jchar*     oopDesc::char_field_addr(int offset)     const { return (jchar*)    field_base(offset); }
+jboolean*  oopDesc::bool_field_addr(int offset)     const { return (jboolean*) field_base(offset); }
+jint*      oopDesc::int_field_addr(int offset)      const { return (jint*)     field_base(offset); }
+jshort*    oopDesc::short_field_addr(int offset)    const { return (jshort*)   field_base(offset); }
+jlong*     oopDesc::long_field_addr(int offset)     const { return (jlong*)    field_base(offset); }
+jfloat*    oopDesc::float_field_addr(int offset)    const { return (jfloat*)   field_base(offset); }
+jdouble*   oopDesc::double_field_addr(int offset)   const { return (jdouble*)  field_base(offset); }
+Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
+
+template <class T> T* oopDesc::obj_field_addr(int offset) const { return (T*)  field_base(offset); }
+address*   oopDesc::address_field_addr(int offset)  const { return (address*)  field_base(offset); }
+
+
+// Functions for getting and setting oops within instance objects.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop.  All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appopriate code).
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base.  Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+inline bool check_obj_alignment(oop obj) {
+  return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
 }
 
-inline void update_barrier_set(void* p, oop v, bool release = false) {
-  assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
-  oopDesc::bs()->write_ref_field(p, v, release);
+inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+  assert(!is_null(v), "narrow oop value can never be zero");
+  address base = Universe::narrow_oop_base();
+  int    shift = Universe::narrow_oop_shift();
+  oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+  assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
+  return result;
+}
+
+inline oop oopDesc::decode_heap_oop(narrowOop v) {
+  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
 }
 
-template <class T> inline void update_barrier_set_pre(T* p, oop v) {
-  oopDesc::bs()->write_ref_field_pre(p, v);
+narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
+  assert(!is_null(v), "oop value can never be zero");
+  assert(check_obj_alignment(v), "Address not aligned");
+  assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
+  address base = Universe::narrow_oop_base();
+  int    shift = Universe::narrow_oop_shift();
+  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
+  assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
+  uint64_t result = pd >> shift;
+  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
+  assert(decode_heap_oop(result) == v, "reversibility");
+  return (narrowOop)result;
+}
+
+inline narrowOop oopDesc::encode_heap_oop(oop v) {
+  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
+}
+
+// Load and decode an oop out of the Java heap into a wide oop.
+oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
+  return decode_heap_oop_not_null(*p);
+}
+
+// Load and decode an oop out of the heap accepting null
+oop oopDesc::load_decode_heap_oop(narrowOop* p) {
+  return decode_heap_oop(*p);
+}
+
+// Encode and store a heap oop.
+void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
+  *p = encode_heap_oop_not_null(v);
 }
 
-template <class T> inline void oop_store(T* p, oop v) {
-  if (always_do_update_barrier) {
-    oop_store((volatile T*)p, v);
+// Encode and store a heap oop allowing for null.
+void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
+  *p = encode_heap_oop(v);
+}
+
+// Store heap oop as is for volatile fields.
+void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) {
+  OrderAccess::release_store(p, v);
+}
+
+void oopDesc::release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v) {
+  // heap oop is not pointer sized.
+  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
+}
+void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+
+void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) {
+  OrderAccess::release_store_ptr(p, v);
+}
+void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) {
+  OrderAccess::release_store(p, encode_heap_oop(v));
+}
+
+// These functions are only used to exchange oop fields in instances,
+// not headers.
+oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
+  if (UseCompressedOops) {
+    // encode exchange value from oop to T
+    narrowOop val = encode_heap_oop(exchange_value);
+    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
+    // decode old from T to oop
+    return decode_heap_oop(old);
   } else {
-    update_barrier_set_pre(p, v);
-    oopDesc::encode_store_heap_oop(p, v);
-    // always_do_update_barrier == false =>
-    // Either we are at a safepoint (in GC) or CMS is not used. In both
-    // cases it's unnecessary to mark the card as dirty with release sematics.
-    update_barrier_set((void*)p, v, false /* release */);  // cast away type
+    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
   }
 }
 
-template <class T> inline void oop_store(volatile T* p, oop v) {
-  update_barrier_set_pre((T*)p, v);   // cast away volatile
-  // Used by release_obj_field_put, so use release_store_ptr.
-  oopDesc::release_encode_store_heap_oop(p, v);
-  // When using CMS we must mark the card corresponding to p as dirty
-  // with release sematics to prevent that CMS sees the dirty card but
-  // not the new value v at p due to reordering of the two
-  // stores. Note that CMS has a concurrent precleaning phase, where
-  // it reads the card table while the Java threads are running.
-  update_barrier_set((void*)p, v, true /* release */);    // cast away type
-}
-
-// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
-// (without having to remember the function name this calls).
-inline void oop_store_raw(HeapWord* addr, oop value) {
-  if (UseCompressedOops) {
-    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
-  } else {
-    oopDesc::encode_store_heap_oop((oop*)addr, value);
-  }
-}
-
-inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
-                                                volatile HeapWord *dest,
-                                                oop compare_value,
-                                                bool prebarrier) {
+oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
+                                         volatile HeapWord *dest,
+                                         oop compare_value,
+                                         bool prebarrier) {
   if (UseCompressedOops) {
     if (prebarrier) {
       update_barrier_set_pre((narrowOop*)dest, exchange_value);
@@ -533,24 +409,112 @@
   }
 }
 
-// Used only for markSweep, scavenging
-inline bool oopDesc::is_gc_marked() const {
-  return mark()->is_marked();
+// In order to put or get a field out of an instance, must first check
+// if the field has been compressed and uncompress it.
+oop oopDesc::obj_field(int offset) const {
+  return UseCompressedOops ?
+    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
+    load_decode_heap_oop(obj_field_addr<oop>(offset));
+}
+
+void oopDesc::obj_field_put(int offset, oop value) {
+  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
+                      oop_store(obj_field_addr<oop>(offset),       value);
+}
+
+void oopDesc::obj_field_put_raw(int offset, oop value) {
+  UseCompressedOops ?
+    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
+    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
+}
+void oopDesc::obj_field_put_volatile(int offset, oop value) {
+  OrderAccess::release();
+  obj_field_put(offset, value);
+  OrderAccess::fence();
 }
 
-inline bool oopDesc::is_locked() const {
+Metadata* oopDesc::metadata_field(int offset) const           { return *metadata_field_addr(offset);   }
+void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value;  }
+
+jbyte oopDesc::byte_field(int offset) const                   { return (jbyte) *byte_field_addr(offset);    }
+void oopDesc::byte_field_put(int offset, jbyte contents)      { *byte_field_addr(offset) = (jint) contents; }
+
+jchar oopDesc::char_field(int offset) const                   { return (jchar) *char_field_addr(offset);    }
+void oopDesc::char_field_put(int offset, jchar contents)      { *char_field_addr(offset) = (jint) contents; }
+
+jboolean oopDesc::bool_field(int offset) const                { return (jboolean) *bool_field_addr(offset); }
+void oopDesc::bool_field_put(int offset, jboolean contents)   { *bool_field_addr(offset) = (jint) contents; }
+
+jint oopDesc::int_field(int offset) const                     { return *int_field_addr(offset);        }
+void oopDesc::int_field_put(int offset, jint contents)        { *int_field_addr(offset) = contents;    }
+
+jshort oopDesc::short_field(int offset) const                 { return (jshort) *short_field_addr(offset);  }
+void oopDesc::short_field_put(int offset, jshort contents)    { *short_field_addr(offset) = (jint) contents;}
+
+jlong oopDesc::long_field(int offset) const                   { return *long_field_addr(offset);       }
+void oopDesc::long_field_put(int offset, jlong contents)      { *long_field_addr(offset) = contents;   }
+
+jfloat oopDesc::float_field(int offset) const                 { return *float_field_addr(offset);      }
+void oopDesc::float_field_put(int offset, jfloat contents)    { *float_field_addr(offset) = contents;  }
+
+jdouble oopDesc::double_field(int offset) const               { return *double_field_addr(offset);     }
+void oopDesc::double_field_put(int offset, jdouble contents)  { *double_field_addr(offset) = contents; }
+
+address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
+void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
+
+oop oopDesc::obj_field_acquire(int offset) const {
+  return UseCompressedOops ?
+             decode_heap_oop((narrowOop)
+               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
+           : decode_heap_oop((oop)
+               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
+}
+void oopDesc::release_obj_field_put(int offset, oop value) {
+  UseCompressedOops ?
+    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
+    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
+}
+
+jbyte oopDesc::byte_field_acquire(int offset) const                   { return OrderAccess::load_acquire(byte_field_addr(offset));     }
+void oopDesc::release_byte_field_put(int offset, jbyte contents)      { OrderAccess::release_store(byte_field_addr(offset), contents); }
+
+jchar oopDesc::char_field_acquire(int offset) const                   { return OrderAccess::load_acquire(char_field_addr(offset));     }
+void oopDesc::release_char_field_put(int offset, jchar contents)      { OrderAccess::release_store(char_field_addr(offset), contents); }
+
+jboolean oopDesc::bool_field_acquire(int offset) const                { return OrderAccess::load_acquire(bool_field_addr(offset));     }
+void oopDesc::release_bool_field_put(int offset, jboolean contents)   { OrderAccess::release_store(bool_field_addr(offset), contents); }
+
+jint oopDesc::int_field_acquire(int offset) const                     { return OrderAccess::load_acquire(int_field_addr(offset));      }
+void oopDesc::release_int_field_put(int offset, jint contents)        { OrderAccess::release_store(int_field_addr(offset), contents);  }
+
+jshort oopDesc::short_field_acquire(int offset) const                 { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
+void oopDesc::release_short_field_put(int offset, jshort contents)    { OrderAccess::release_store(short_field_addr(offset), contents);     }
+
+jlong oopDesc::long_field_acquire(int offset) const                   { return OrderAccess::load_acquire(long_field_addr(offset));       }
+void oopDesc::release_long_field_put(int offset, jlong contents)      { OrderAccess::release_store(long_field_addr(offset), contents);   }
+
+jfloat oopDesc::float_field_acquire(int offset) const                 { return OrderAccess::load_acquire(float_field_addr(offset));      }
+void oopDesc::release_float_field_put(int offset, jfloat contents)    { OrderAccess::release_store(float_field_addr(offset), contents);  }
+
+jdouble oopDesc::double_field_acquire(int offset) const               { return OrderAccess::load_acquire(double_field_addr(offset));     }
+void oopDesc::release_double_field_put(int offset, jdouble contents)  { OrderAccess::release_store(double_field_addr(offset), contents); }
+
+address oopDesc::address_field_acquire(int offset) const              { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
+void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
+
+bool oopDesc::is_locked() const {
   return mark()->is_locked();
 }
 
-inline bool oopDesc::is_unlocked() const {
+bool oopDesc::is_unlocked() const {
   return mark()->is_unlocked();
 }
 
-inline bool oopDesc::has_bias_pattern() const {
+bool oopDesc::has_bias_pattern() const {
   return mark()->has_bias_pattern();
 }
 
-
 // used only for asserts
 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
   oop obj = (oop) this;
@@ -580,25 +544,30 @@
 
 #ifndef PRODUCT
 // used only for asserts
-inline bool oopDesc::is_unlocked_oop() const {
+bool oopDesc::is_unlocked_oop() const {
   if (!Universe::heap()->is_in_reserved(this)) return false;
   return mark()->is_unlocked();
 }
 #endif // PRODUCT
 
-inline bool oopDesc::is_scavengable() const {
+// Used only for markSweep, scavenging
+bool oopDesc::is_gc_marked() const {
+  return mark()->is_marked();
+}
+
+bool oopDesc::is_scavengable() const {
   return Universe::heap()->is_scavengable(this);
 }
 
 // Used by scavengers
-inline bool oopDesc::is_forwarded() const {
+bool oopDesc::is_forwarded() const {
   // The extra heap check is needed since the obj might be locked, in which case the
   // mark would point to a stack location and have the sentinel bit cleared
   return mark()->is_marked();
 }
 
 // Used by scavengers
-inline void oopDesc::forward_to(oop p) {
+void oopDesc::forward_to(oop p) {
   assert(check_obj_alignment(p),
          "forwarding to something not aligned");
   assert(Universe::heap()->is_in_reserved(p),
@@ -609,7 +578,7 @@
 }
 
 // Used by parallel scavengers
-inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
+bool oopDesc::cas_forward_to(oop p, markOop compare) {
   assert(check_obj_alignment(p),
          "forwarding to something not aligned");
   assert(Universe::heap()->is_in_reserved(p),
@@ -620,7 +589,7 @@
 }
 
 #if INCLUDE_ALL_GCS
-inline oop oopDesc::forward_to_atomic(oop p) {
+oop oopDesc::forward_to_atomic(oop p) {
   markOop oldMark = mark();
   markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
   markOop curMark;
@@ -646,22 +615,10 @@
 // Note that the forwardee is not the same thing as the displaced_mark.
 // The forwardee is used when copying during scavenge and mark-sweep.
 // It does need to clear the low two locking- and GC-related bits.
-inline oop oopDesc::forwardee() const {
+oop oopDesc::forwardee() const {
   return (oop) mark()->decode_pointer();
 }
 
-inline bool oopDesc::has_displaced_mark() const {
-  return mark()->has_displaced_mark_helper();
-}
-
-inline markOop oopDesc::displaced_mark() const {
-  return mark()->displaced_mark_helper();
-}
-
-inline void oopDesc::set_displaced_mark(markOop m) {
-  mark()->set_displaced_mark_helper(m);
-}
-
 // The following method needs to be MT safe.
 inline uint oopDesc::age() const {
   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
@@ -672,7 +629,7 @@
   }
 }
 
-inline void oopDesc::incr_age() {
+void oopDesc::incr_age() {
   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
   if (has_displaced_mark()) {
     set_displaced_mark(displaced_mark()->incr_age());
@@ -681,8 +638,94 @@
   }
 }
 
+int oopDesc::ms_adjust_pointers() {
+  debug_only(int check_size = size());
+  int s = klass()->oop_ms_adjust_pointers(this);
+  assert(s == check_size, "should be the same");
+  return s;
+}
 
-inline intptr_t oopDesc::identity_hash() {
+#if INCLUDE_ALL_GCS
+void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
+  klass()->oop_pc_follow_contents(this, cm);
+}
+
+void oopDesc::pc_update_contents() {
+  Klass* k = klass();
+  if (!k->is_typeArray_klass()) {
+    // It might contain oops beyond the header, so take the virtual call.
+    k->oop_pc_update_pointers(this);
+  }
+  // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
+}
+
+void oopDesc::ps_push_contents(PSPromotionManager* pm) {
+  Klass* k = klass();
+  if (!k->is_typeArray_klass()) {
+    // It might contain oops beyond the header, so take the virtual call.
+    k->oop_ps_push_contents(this, pm);
+  }
+  // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
+}
+#endif // INCLUDE_ALL_GCS
+
+#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                 \
+                                                                    \
+void oopDesc::oop_iterate(OopClosureType* blk) {                    \
+  klass()->oop_oop_iterate##nv_suffix(this, blk);                   \
+}                                                                   \
+                                                                    \
+void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {      \
+  klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);       \
+}
+
+#define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)            \
+                                                                    \
+int oopDesc::oop_iterate_size(OopClosureType* blk) {                \
+  Klass* k = klass();                                               \
+  int size = size_given_klass(k);                                   \
+  k->oop_oop_iterate##nv_suffix(this, blk);                         \
+  return size;                                                      \
+}                                                                   \
+                                                                    \
+int oopDesc::oop_iterate_size(OopClosureType* blk, MemRegion mr) {  \
+  Klass* k = klass();                                               \
+  int size = size_given_klass(k);                                   \
+  k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);             \
+  return size;                                                      \
+}
+
+int oopDesc::oop_iterate_no_header(OopClosure* blk) {
+  // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
+  // the do_oop calls, but turns off all other features in ExtendedOopClosure.
+  NoHeaderExtendedOopClosure cl(blk);
+  return oop_iterate_size(&cl);
+}
+
+int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
+  NoHeaderExtendedOopClosure cl(blk);
+  return oop_iterate_size(&cl, mr);
+}
+
+#if INCLUDE_ALL_GCS
+#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
+                                                                    \
+inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
+  klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);         \
+}
+#else
+#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
+#endif // INCLUDE_ALL_GCS
+
+#define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
+  OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
+  OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)          \
+  OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
+
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
+ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
+
+intptr_t oopDesc::identity_hash() {
   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   // Note: The mark must be read into local variable to avoid concurrent updates.
   markOop mrk = mark();
@@ -695,92 +738,16 @@
   }
 }
 
-inline int oopDesc::ms_adjust_pointers() {
-  debug_only(int check_size = size());
-  int s = klass()->oop_ms_adjust_pointers(this);
-  assert(s == check_size, "should be the same");
-  return s;
-}
-
-#if INCLUDE_ALL_GCS
-inline void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
-  klass()->oop_pc_follow_contents(this, cm);
-}
-
-inline void oopDesc::pc_update_contents() {
-  Klass* k = klass();
-  if (!k->is_typeArray_klass()) {
-    // It might contain oops beyond the header, so take the virtual call.
-    k->oop_pc_update_pointers(this);
-  }
-  // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
-}
-
-inline void oopDesc::ps_push_contents(PSPromotionManager* pm) {
-  Klass* k = klass();
-  if (!k->is_typeArray_klass()) {
-    // It might contain oops beyond the header, so take the virtual call.
-    k->oop_ps_push_contents(this, pm);
-  }
-  // Else skip it.  The TypeArrayKlass in the header never needs scavenging.
-}
-#endif
-
-#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
-                                                                       \
-inline void oopDesc::oop_iterate(OopClosureType* blk) {                \
-  klass()->oop_oop_iterate##nv_suffix(this, blk);                      \
-}                                                                      \
-                                                                       \
-inline void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {  \
-  klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);          \
+bool oopDesc::has_displaced_mark() const {
+  return mark()->has_displaced_mark_helper();
 }
 
-#define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)               \
-                                                                       \
-inline int oopDesc::oop_iterate_size(OopClosureType* blk) {            \
-  Klass* k = klass();                                                  \
-  int size = size_given_klass(k);                                      \
-  k->oop_oop_iterate##nv_suffix(this, blk);                            \
-  return size;                                                         \
-}                                                                      \
-                                                                       \
-inline int oopDesc::oop_iterate_size(OopClosureType* blk,              \
-                                     MemRegion mr) {                   \
-  Klass* k = klass();                                                  \
-  int size = size_given_klass(k);                                      \
-  k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);                \
-  return size;                                                         \
-}
-
-inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
-  // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
-  // the do_oop calls, but turns off all other features in ExtendedOopClosure.
-  NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate_size(&cl);
+markOop oopDesc::displaced_mark() const {
+  return mark()->displaced_mark_helper();
 }
 
-inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
-  NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate_size(&cl, mr);
+void oopDesc::set_displaced_mark(markOop m) {
+  mark()->set_displaced_mark_helper(m);
 }
 
-#if INCLUDE_ALL_GCS
-#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
-                                                                    \
-inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
-  klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);         \
-}
-#else
-#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
-#endif
-
-#define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
-  OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
-  OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)          \
-  OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
-
-ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
-ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
-
 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP