8129417: Oop iteration clean-up to remove oop_ms_follow_contents
authorsjohanss
Wed, 02 Sep 2015 09:14:04 +0200
changeset 32606 fdaa30d06ada
parent 32605 dab0de4ff7ff
child 32607 c69a7b61ab02
child 32608 ef2ec6fac731
8129417: Oop iteration clean-up to remove oop_ms_follow_contents Reviewed-by: pliden, ehelin
hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp
hotspot/src/share/vm/gc/cms/cmsOopClosures.inline.hpp
hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc/g1/concurrentMark.cpp
hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc/g1/heapRegion.cpp
hotspot/src/share/vm/gc/parallel/immutableSpace.cpp
hotspot/src/share/vm/gc/parallel/mutableSpace.cpp
hotspot/src/share/vm/gc/parallel/mutableSpace.hpp
hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp
hotspot/src/share/vm/gc/serial/genMarkSweep.cpp
hotspot/src/share/vm/gc/serial/markSweep.cpp
hotspot/src/share/vm/gc/serial/markSweep.hpp
hotspot/src/share/vm/gc/serial/markSweep.inline.hpp
hotspot/src/share/vm/gc/shared/space.cpp
hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp
hotspot/src/share/vm/gc/shared/taskqueue.cpp
hotspot/src/share/vm/memory/iterator.hpp
hotspot/src/share/vm/memory/iterator.inline.hpp
hotspot/src/share/vm/oops/arrayKlass.hpp
hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp
hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp
hotspot/src/share/vm/oops/instanceKlass.hpp
hotspot/src/share/vm/oops/instanceKlass.inline.hpp
hotspot/src/share/vm/oops/instanceMirrorKlass.hpp
hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp
hotspot/src/share/vm/oops/instanceRefKlass.hpp
hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp
hotspot/src/share/vm/oops/klass.hpp
hotspot/src/share/vm/oops/objArrayKlass.hpp
hotspot/src/share/vm/oops/objArrayKlass.inline.hpp
hotspot/src/share/vm/oops/objArrayOop.cpp
hotspot/src/share/vm/oops/objArrayOop.hpp
hotspot/src/share/vm/oops/oop.hpp
hotspot/src/share/vm/oops/oop.inline.hpp
hotspot/src/share/vm/oops/typeArrayKlass.hpp
hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp
hotspot/src/share/vm/utilities/stack.inline.hpp
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -66,7 +66,8 @@
   virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
+  void do_cld_nv(ClassLoaderData* cld);
 };
 
 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -50,11 +50,11 @@
 
 inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
 
-inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -702,7 +702,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
@@ -729,7 +729,7 @@
         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
                     oop(bottom)) &&                                             \
         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
+      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
       bottom += _cfls->adjustObjectSize(word_sz);                               \
     } else {                                                                    \
       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -4623,7 +4623,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      par_mrias_cl.do_class_loader_data(array->at(i));
+      par_mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -5199,7 +5199,7 @@
     ResourceMark rm;
     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
     for (int i = 0; i < array->length(); i++) {
-      mrias_cl.do_class_loader_data(array->at(i));
+      mrias_cl.do_cld_nv(array->at(i));
     }
 
     // We don't need to keep track of new CLDs anymore.
@@ -6324,12 +6324,12 @@
           // objArrays are precisely marked; restrict scanning
           // to dirty cards only.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure, mr));
+                   p->oop_iterate_size(_scanningClosure, mr));
         } else {
           // A non-array may have been imprecisely marked; we need
           // to scan object in its entirety.
           size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate(_scanningClosure));
+                   p->oop_iterate_size(_scanningClosure));
         }
         #ifdef ASSERT
           size_t direct_size =
@@ -6417,7 +6417,7 @@
   // Note that we do not yield while we iterate over
   // the interior oops of p, pushing the relevant ones
   // on our marking stack.
-  size_t size = p->oop_iterate(_scanning_closure);
+  size_t size = p->oop_iterate_size(_scanning_closure);
   do_yield_check();
   // Observe that below, we do not abandon the preclean
   // phase as soon as we should; rather we empty the
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -1143,7 +1143,7 @@
   while (curr < end) {
     Prefetch::read(curr, interval);
     oop obj = oop(curr);
-    int size = obj->oop_iterate(&cl);
+    int size = obj->oop_iterate_size(&cl);
     assert(size == obj->size(), "sanity");
     curr += size;
   }
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -74,7 +74,7 @@
   assert(rp != NULL, "should be non-NULL");
   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
 
-  GenMarkSweep::_ref_processor = rp;
+  GenMarkSweep::set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   // When collecting the permanent generation Method*s may be moving,
@@ -108,7 +108,7 @@
   JvmtiExport::gc_epilogue();
 
   // refs processing: clean slate
-  GenMarkSweep::_ref_processor = NULL;
+  GenMarkSweep::set_ref_processor(NULL);
 }
 
 
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -68,7 +68,7 @@
   // or it was allocated after marking finished, then we add it. Otherwise
   // we can safely ignore the object.
   if (!g1h->is_obj_dead(oop(cur), _hr)) {
-    oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
+    oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
   } else {
     oop_size = _hr->block_size(cur);
   }
--- a/hotspot/src/share/vm/gc/parallel/immutableSpace.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/immutableSpace.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -44,7 +44,7 @@
   HeapWord* t = end();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
+    obj_addr += oop(obj_addr)->oop_iterate_size(cl);
   }
 }
 
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -213,15 +213,6 @@
   return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
 }
 
-void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
-  HeapWord* obj_addr = bottom();
-  HeapWord* t = top();
-  // Could call objects iterate, but this is easier.
-  while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(cl);
-  }
-}
-
 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
   HeapWord* obj_addr = bottom();
   HeapWord* t = top();
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -134,7 +134,6 @@
   bool cas_deallocate(HeapWord *obj, size_t size);
 
   // Iteration.
-  void oop_iterate(ExtendedOopClosure* cl);
   void oop_iterate_no_header(OopClosure* cl);
   void object_iterate(ObjectClosure* cl);
 
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -60,7 +60,7 @@
 
 void PSMarkSweep::initialize() {
   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
-  _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
+  set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
 
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -67,7 +67,7 @@
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
-  _ref_processor = rp;
+  set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
   GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
@@ -136,7 +136,7 @@
   }
 
   // refs processing: clean slate
-  _ref_processor = NULL;
+  set_ref_processor(NULL);
 
   // Update heap occupancy information which is used as
   // input to soft ref clearing policy at the next gc.
--- a/hotspot/src/share/vm/gc/serial/markSweep.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -28,11 +28,20 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
+#include "gc/shared/specialized_oop_closures.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/instanceClassLoaderKlass.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
+#include "oops/instanceRefKlass.inline.hpp"
 #include "oops/methodData.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/stack.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1StringDedup.hpp"
+#endif // INCLUDE_ALL_GCS
 
 uint                    MarkSweep::_total_invocations = 0;
 
@@ -50,176 +59,101 @@
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
-void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
-void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
-
-MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
+MarkAndPushClosure            MarkSweep::mark_and_push_closure;
 CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
 CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
-template <typename T>
-void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p)       { mark_and_push(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(oop* p)        { do_oop_nv(p); }
-void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p)  { do_oop_nv(p); }
+inline void MarkSweep::mark_object(oop obj) {
+#if INCLUDE_ALL_GCS
+  if (G1StringDedup::is_enabled()) {
+    // We must enqueue the object before it is marked
+    // as we otherwise can't read the object's age.
+    G1StringDedup::enqueue_from_mark(obj);
+  }
+#endif
+  // some marks may contain information we need to preserve so we store them away
+  // and overwrite the mark.  We'll restore it at the end of markSweep.
+  markOop mark = obj->mark();
+  obj->set_mark(markOopDesc::prototype()->set_marked());
 
-void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
-  MarkSweep::follow_cld_closure.do_cld(cld);
-}
-
-void InstanceKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj != NULL, "can't follow the content of NULL object");
-  MarkSweep::follow_klass(this);
-
-  oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
+  if (mark->must_be_preserved(obj)) {
+    preserve_mark(obj, mark);
+  }
 }
 
-void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  // Follow the klass field in the mirror
-  Klass* klass = java_lang_Class::as_Klass(obj);
-  if (klass != NULL) {
-    // An anonymous class doesn't have its own class loader, so the call
-    // to follow_klass will mark and push its java mirror instead of the
-    // class loader. When handling the java mirror for an anonymous class
-    // we need to make sure its class loader data is claimed, this is done
-    // by calling follow_class_loader explicitly. For non-anonymous classes
-    // the call to follow_class_loader is made when the class loader itself
-    // is handled.
-    if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
-      MarkSweep::follow_class_loader(klass->class_loader_data());
-    } else {
-      MarkSweep::follow_klass(klass);
+template <class T> inline void MarkSweep::mark_and_push(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      _marking_stack.push(obj);
     }
-  } else {
-    // If klass is NULL then this a mirror for a primitive type.
-    // We don't have to follow them, since they are handled as strong
-    // roots in Universe::oops_do.
-    assert(java_lang_Class::is_primitive(obj), "Sanity check");
-  }
-
-  oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
-}
-
-void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
-  InstanceKlass::oop_ms_follow_contents(obj);
-
-  ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
-
-  // We must NULL check here, since the class loader
-  // can be found before the loader data has been set up.
-  if(loader_data != NULL) {
-    MarkSweep::follow_class_loader(loader_data);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
-  T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
-  T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
-    }
-  )
-  if (!oopDesc::is_null(heap_oop)) {
-    oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!referent->is_gc_marked() &&
-        MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
-      // reference was discovered, referent will be traversed later
-      klass->InstanceKlass::oop_ms_follow_contents(obj);
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
-        }
-      )
-      return;
-    } else {
-      // treat referent as normal oop
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " PTR_FORMAT, p2i(obj));
-        }
-      )
-      MarkSweep::mark_and_push(referent_addr);
-    }
-  }
-  T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
-  // Treat discovered as normal oop, if ref is not "active",
-  // i.e. if next is non-NULL.
-  T  next_oop = oopDesc::load_heap_oop(next_addr);
-  if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
-    T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(discovered_addr));
-      }
-    )
-    MarkSweep::mark_and_push(discovered_addr);
-  }
-  // treat next as normal oop.  next is a link in the reference queue.
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("   Process next as normal " PTR_FORMAT, p2i(next_addr));
-    }
-  )
-  MarkSweep::mark_and_push(next_addr);
-  klass->InstanceKlass::oop_ms_follow_contents(obj);
+inline void MarkSweep::follow_klass(Klass* klass) {
+  oop op = klass->klass_holder();
+  MarkSweep::mark_and_push(&op);
+}
+
+inline void MarkSweep::follow_cld(ClassLoaderData* cld) {
+  MarkSweep::follow_cld_closure.do_cld(cld);
 }
 
-void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(this, obj);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(this, obj);
+template <typename T>
+inline void MarkAndPushClosure::do_oop_nv(T* p)                 { MarkSweep::mark_and_push(p); }
+void MarkAndPushClosure::do_oop(oop* p)                         { do_oop_nv(p); }
+void MarkAndPushClosure::do_oop(narrowOop* p)                   { do_oop_nv(p); }
+inline bool MarkAndPushClosure::do_metadata_nv()                { return true; }
+bool MarkAndPushClosure::do_metadata()                          { return do_metadata_nv(); }
+inline void MarkAndPushClosure::do_klass_nv(Klass* k)           { MarkSweep::follow_klass(k); }
+void MarkAndPushClosure::do_klass(Klass* k)                     { do_klass_nv(k); }
+inline void MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) { MarkSweep::follow_cld(cld); }
+void MarkAndPushClosure::do_cld(ClassLoaderData* cld)           { do_cld_nv(cld); }
+
+template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+  mark_and_push(p);
+}
+
+void MarkSweep::push_objarray(oop obj, size_t index) {
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  _objarray_stack.push(task);
+}
+
+inline void MarkSweep::follow_array(objArrayOop array) {
+  MarkSweep::follow_klass(array->klass());
+  // Don't push empty arrays to avoid unnecessary work.
+  if (array->length() > 0) {
+    MarkSweep::push_objarray(array, 0);
   }
 }
 
-template <class T>
-static void oop_ms_follow_contents_specialized(oop obj, int index) {
-  objArrayOop a = objArrayOop(obj);
-  const size_t len = size_t(a->length());
-  const size_t beg_index = size_t(index);
-  assert(beg_index < len || len == 0, "index too large");
-
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
-  const size_t end_index = beg_index + stride;
-  T* const base = (T*)a->base();
-  T* const beg = base + beg_index;
-  T* const end = base + end_index;
-
-  // Push the non-NULL elements of the next stride on the marking stack.
-  for (T* e = beg; e < end; e++) {
-    MarkSweep::mark_and_push<T>(e);
-  }
-
-  if (end_index < len) {
-    MarkSweep::push_objarray(a, end_index); // Push the continuation.
+inline void MarkSweep::follow_object(oop obj) {
+  assert(obj->is_gc_marked(), "should be marked");
+  if (obj->is_objArray()) {
+    // Handle object arrays explicitly to allow them to
+    // be split into chunks if needed.
+    MarkSweep::follow_array((objArrayOop)obj);
+  } else {
+    obj->oop_iterate(&mark_and_push_closure);
   }
 }
 
-void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert (obj->is_array(), "obj must be array");
-  MarkSweep::follow_klass(this);
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(obj, 0);
-  }
-}
+void MarkSweep::follow_array_chunk(objArrayOop array, int index) {
+  const int len = array->length();
+  const int beg_index = index;
+  assert(beg_index < len || len == 0, "index too large");
 
-void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
-  assert(obj->is_typeArray(),"must be a type array");
-  // Performance tweak: We skip iterating over the klass pointer since we
-  // know that Universe::TypeArrayKlass never moves.
-}
+  const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
+  const int end_index = beg_index + stride;
 
-void MarkSweep::follow_array(objArrayOop array, int index) {
-  if (UseCompressedOops) {
-    oop_ms_follow_contents_specialized<narrowOop>(array, index);
-  } else {
-    oop_ms_follow_contents_specialized<oop>(array, index);
+  array->oop_iterate_range(&mark_and_push_closure, beg_index, end_index);
+
+  if (end_index < len) {
+    MarkSweep::push_objarray(array, end_index); // Push the continuation.
   }
 }
 
@@ -233,7 +167,7 @@
     // Process ObjArrays one at a time to avoid marking stack bloat.
     if (!_objarray_stack.is_empty()) {
       ObjArrayTask task = _objarray_stack.pop();
-      follow_array(objArrayOop(task.obj()), task.index());
+      follow_array_chunk(objArrayOop(task.obj()), task.index());
     }
   } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
 }
@@ -242,6 +176,24 @@
 
 void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
+template <class T> inline void MarkSweep::follow_root(T* p) {
+  assert(!Universe::heap()->is_in_reserved(p),
+         "roots shouldn't be things within the heap");
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (!obj->mark()->is_marked() &&
+        !is_archive_object(obj)) {
+      mark_object(obj);
+      follow_object(obj);
+    }
+  }
+  follow_stack();
+}
+
+void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
+
 void PreservedMark::adjust_pointer() {
   MarkSweep::adjust_pointer(&_obj);
 }
@@ -266,6 +218,11 @@
   }
 }
 
+void MarkSweep::set_ref_processor(ReferenceProcessor* rp) {
+  _ref_processor = rp;
+  mark_and_push_closure.set_ref_processor(_ref_processor);
+}
+
 MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
 template <typename T>
@@ -405,3 +362,6 @@
   // know that Universe::TypeArrayKlass never moves.
   return t->object_size();
 }
+
+// Generate MS specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/gc/serial/markSweep.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -49,6 +49,7 @@
 
 // declared at end
 class PreservedMark;
+class MarkAndPushClosure;
 
 class MarkSweep : AllStatic {
   //
@@ -60,13 +61,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  class MarkAndPushClosure: public ExtendedOopClosure {
-   public:
-    template <typename T> void do_oop_nv(T* p);
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
-  };
-
   class FollowStackClosure: public VoidClosure {
    public:
     virtual void do_void();
@@ -146,6 +140,7 @@
 
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+  static void set_ref_processor(ReferenceProcessor* rp);
 
   // Archive Object handling
   static inline bool is_archive_object(oop object);
@@ -153,34 +148,55 @@
   static STWGCTimer* gc_timer() { return _gc_timer; }
   static SerialOldTracer* gc_tracer() { return _gc_tracer; }
 
+  static void preserve_mark(oop p, markOop mark);
+                                // Save the mark word so it can be restored later
+  static void adjust_marks();   // Adjust the pointers in the preserved marks table
+  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+
+  static int adjust_pointers(oop obj);
+
+  static void follow_stack();   // Empty marking stack.
+
+  static void follow_klass(Klass* klass);
+
+  static void follow_cld(ClassLoaderData* cld);
+
+  template <class T> static inline void adjust_pointer(T* p);
+
+  // Check mark and maybe push on marking stack
+  template <class T> static void mark_and_push(T* p);
+
+ private:
   // Call backs for marking
   static void mark_object(oop obj);
   // Mark pointer and follow contents.  Empty marking stack afterwards.
   template <class T> static inline void follow_root(T* p);
 
-  // Check mark and maybe push on marking stack
-  template <class T> static void mark_and_push(T* p);
-
   static inline void push_objarray(oop obj, size_t index);
 
-  static void follow_stack();   // Empty marking stack.
-
   static void follow_object(oop obj);
 
-  static void follow_array(objArrayOop array, int index);
+  static void follow_array(objArrayOop array);
+
+  static void follow_array_chunk(objArrayOop array, int index);
+};
 
-  static void follow_klass(Klass* klass);
-
-  static void follow_class_loader(ClassLoaderData* cld);
+class MarkAndPushClosure: public ExtendedOopClosure {
+public:
+  template <typename T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
 
-  static int adjust_pointers(oop obj);
+  virtual bool do_metadata();
+  bool do_metadata_nv();
 
-  static void preserve_mark(oop p, markOop mark);
-                                // Save the mark word so it can be restored later
-  static void adjust_marks();   // Adjust the pointers in the preserved marks table
-  static void restore_marks();  // Restore the marks that we saved in preserve_mark
+  virtual void do_klass(Klass* k);
+  void do_klass_nv(Klass* k);
 
-  template <class T> static inline void adjust_pointer(T* p);
+  virtual void do_cld(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+
+  void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -26,38 +26,13 @@
 #define SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
 
 #include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "oops/instanceClassLoaderKlass.inline.hpp"
-#include "oops/instanceKlass.inline.hpp"
-#include "oops/instanceMirrorKlass.inline.hpp"
-#include "oops/instanceRefKlass.inline.hpp"
+#include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/objArrayKlass.inline.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/stack.inline.hpp"
+#include "oops/oop.inline.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #endif // INCLUDE_ALL_GCS
 
-inline void MarkSweep::mark_object(oop obj) {
-#if INCLUDE_ALL_GCS
-  if (G1StringDedup::is_enabled()) {
-    // We must enqueue the object before it is marked
-    // as we otherwise can't read the object's age.
-    G1StringDedup::enqueue_from_mark(obj);
-  }
-#endif
-  // some marks may contain information we need to preserve so we store them away
-  // and overwrite the mark.  We'll restore it at the end of markSweep.
-  markOop mark = obj->mark();
-  obj->set_mark(markOopDesc::prototype()->set_marked());
-
-  if (mark->must_be_preserved(obj)) {
-    preserve_mark(obj, mark);
-  }
-}
-
 inline bool MarkSweep::is_archive_object(oop object) {
 #if INCLUDE_ALL_GCS
   return (G1MarkSweep::archive_check_enabled() &&
@@ -67,51 +42,6 @@
 #endif
 }
 
-inline void MarkSweep::follow_klass(Klass* klass) {
-  oop op = klass->klass_holder();
-  MarkSweep::mark_and_push(&op);
-}
-
-inline void MarkSweep::follow_object(oop obj) {
-  assert(obj->is_gc_marked(), "should be marked");
-
-  obj->ms_follow_contents();
-}
-
-template <class T> inline void MarkSweep::follow_root(T* p) {
-  assert(!Universe::heap()->is_in_reserved(p),
-         "roots shouldn't be things within the heap");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      follow_object(obj);
-    }
-  }
-  follow_stack();
-}
-
-template <class T> inline void MarkSweep::mark_and_push(T* p) {
-//  assert(Universe::heap()->is_in_reserved(p), "should be in object space");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_archive_object(obj)) {
-      mark_object(obj);
-      _marking_stack.push(obj);
-    }
-  }
-}
-
-void MarkSweep::push_objarray(oop obj, size_t index) {
-  ObjArrayTask task(obj, index);
-  assert(task.is_valid(), "bad ObjArrayTask");
-  _objarray_stack.push(task);
-}
-
 inline int MarkSweep::adjust_pointers(oop obj) {
   return obj->ms_adjust_pointers();
 }
@@ -139,8 +69,4 @@
   }
 }
 
-template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
-  mark_and_push(p);
-}
-
 #endif // SHARE_VM_GC_SERIAL_MARKSWEEP_INLINE_HPP
--- a/hotspot/src/share/vm/gc/shared/space.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/space.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -237,7 +237,7 @@
                                                    HeapWord* bottom,    \
                                                    HeapWord* top,       \
                                                    ClosureType* cl) {   \
-  bottom += oop(bottom)->oop_iterate(cl, mr);                           \
+  bottom += oop(bottom)->oop_iterate_size(cl, mr);                      \
   if (bottom < top) {                                                   \
     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
     while (next_obj < top) {                                            \
@@ -508,7 +508,7 @@
     HeapWord* t = mr.end();                                                 \
     while (obj_addr < t) {                                                  \
       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
-      obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
+      obj_addr += oop(obj_addr)->oop_iterate_size(blk);                     \
     }                                                                       \
   }
 
@@ -523,7 +523,7 @@
   HeapWord* t = top();
   // Could call objects iterate, but this is easier.
   while (obj_addr < t) {
-    obj_addr += oop(obj_addr)->oop_iterate(blk);
+    obj_addr += oop(obj_addr)->oop_iterate_size(blk);
   }
 }
 
@@ -578,7 +578,7 @@
       Prefetch::write(p, interval);                                       \
       debug_only(HeapWord* prev = p);                                     \
       oop m = oop(p);                                                     \
-      p += m->oop_iterate(blk);                                           \
+      p += m->oop_iterate_size(blk);                                      \
     }                                                                     \
   } while (t < top());                                                    \
                                                                           \
--- a/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/specialized_oop_closures.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -42,6 +42,8 @@
 class ScanClosure;
 class FastScanClosure;
 class FilteringClosure;
+// MarkSweep
+class MarkAndPushClosure;
 // ParNew
 class ParScanWithBarrierClosure;
 class ParScanWithoutBarrierClosure;
@@ -87,6 +89,9 @@
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f)             \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f)
 
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)      \
+  f(MarkAndPushClosure,_nv)
+
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)     \
   f(MarkRefsIntoAndScanClosure,_nv)                     \
@@ -101,10 +106,12 @@
 
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)            \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)           \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)
 #else  // INCLUDE_ALL_GCS
-#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)
 #endif // INCLUDE_ALL_GCS
 
 
--- a/hotspot/src/share/vm/gc/shared/taskqueue.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/taskqueue.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -258,8 +258,8 @@
 
 #ifdef ASSERT
 bool ObjArrayTask::is_valid() const {
-  return _obj != NULL && _obj->is_objArray() && _index > 0 &&
-    _index < objArrayOop(_obj)->length();
+  return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
+      _index < objArrayOop(_obj)->length();
 }
 #endif // ASSERT
 
--- a/hotspot/src/share/vm/memory/iterator.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -61,7 +61,7 @@
   //
   // 1) do_klass on the header klass pointer.
   // 2) do_klass on the klass pointer in the mirrors.
-  // 3) do_class_loader_data on the class loader data in class loaders.
+  // 3) do_cld   on the class loader data in class loaders.
   //
   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
   // to be updated together, or else the devirtualization will break.
@@ -71,13 +71,14 @@
   // ExtendedOopClosures that don't need to walk the metadata.
   // Currently, only CMS and G1 need these.
 
+  bool do_metadata_nv()      { return false; }
   virtual bool do_metadata() { return do_metadata_nv(); }
-  bool do_metadata_nv()      { return false; }
 
-  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
-  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
+  void do_klass_nv(Klass* k)      { ShouldNotReachHere(); }
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
+  void do_cld_nv(ClassLoaderData* cld)      { ShouldNotReachHere(); }
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 
   // True iff this closure may be safely applied more than once to an oop
   // location without an intervening "major reset" (like the end of a GC).
@@ -180,13 +181,14 @@
     _klass_closure.initialize(this);
   }
 
-  virtual bool do_metadata()    { return do_metadata_nv(); }
-  inline  bool do_metadata_nv() { return true; }
+  bool do_metadata_nv()      { return true; }
+  virtual bool do_metadata() { return do_metadata_nv(); }
 
-  virtual void do_klass(Klass* k);
   void do_klass_nv(Klass* k);
+  virtual void do_klass(Klass* k) { do_klass_nv(k); }
 
-  virtual void do_class_loader_data(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+  virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 };
 
 // ObjectClosure is used for iterating through an object space
@@ -370,6 +372,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
@@ -378,6 +381,7 @@
  public:
   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
+  template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 };
 
--- a/hotspot/src/share/vm/memory/iterator.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/memory/iterator.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -36,7 +36,7 @@
 #include "oops/typeArrayKlass.inline.hpp"
 #include "utilities/debug.hpp"
 
-inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
@@ -45,11 +45,9 @@
 
 inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
+  do_cld_nv(cld);
 }
 
-inline void MetadataAwareOopClosure::do_klass(Klass* k)       { do_klass_nv(k); }
-
 #ifdef ASSERT
 // This verification is applied to all visited oops.
 // The closures can turn is off by overriding should_verify_oops().
@@ -78,6 +76,10 @@
   closure->do_klass_nv(k);
 }
 template <class OopClosureType>
+void Devirtualizer<true>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld_nv(cld);
+}
+template <class OopClosureType>
 inline bool Devirtualizer<true>::do_metadata(OopClosureType* closure) {
   // Make sure the non-virtual and the virtual versions match.
   assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
@@ -96,6 +98,10 @@
   closure->do_klass(k);
 }
 template <class OopClosureType>
+void Devirtualizer<false>::do_cld(OopClosureType* closure, ClassLoaderData* cld) {
+  closure->do_cld(cld);
+}
+template <class OopClosureType>
 bool Devirtualizer<false>::do_metadata(OopClosureType* closure) {
   return closure->do_metadata();
 }
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -147,30 +147,30 @@
 // Array oop iteration macros for declarations.
 // Used to generate the declarations in the *ArrayKlass header files.
 
-#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                  \
-  int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
+#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix)                                   \
+  void oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
 
 #if INCLUDE_ALL_GCS
 // Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
-#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)           \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix)            \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Array oop iteration macros for definitions.
 // Used to generate the definitions in the *ArrayKlass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                 \
-                                                                                                         \
-int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
-  return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                        \
+#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix)                                  \
+                                                                                                          \
+void KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) {  \
+  oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end);                                \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)          \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  /* No reverse implementation ATM. */                                                   \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                          \
+#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)           \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  /* No reverse implementation ATM. */                                                    \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                                  \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -51,7 +51,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -71,19 +70,19 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 #if INCLUDE_ALL_GCS
   // Reverse iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
  public:
 
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
 
 #include "classfile/javaClasses.hpp"
+#include "memory/iterator.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -34,48 +35,42 @@
 #include "utilities/macros.hpp"
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
     // cld can be null if we have a non-registered class loader.
     if (cld != NULL) {
-      closure->do_class_loader_data(cld);
+      Devirtualizer<nv>::do_cld(closure, cld);
     }
   }
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   assert(!Devirtualizer<nv>::do_metadata(closure),
       "Code to handle metadata is not implemented");
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     if (mr.contains(obj)) {
       ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
       // cld can be null if we have a non-registered class loader.
       if (cld != NULL) {
-        closure->do_class_loader_data(cld);
+        Devirtualizer<nv>::do_cld(closure, cld);
       }
     }
   }
-
-  return size;
 }
 
 #define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -1014,7 +1014,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
--- a/hotspot/src/share/vm/oops/instanceKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -38,6 +38,8 @@
 // as the previous macro based implementation.
 #ifdef TARGET_COMPILER_visCPP
 #define INLINE __forceinline
+#elif defined(TARGET_COMPILER_sparcWorks)
+#define INLINE __attribute__((always_inline))
 #else
 #define INLINE inline
 #endif
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -91,7 +91,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -121,21 +120,21 @@
   // Forward iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif
 
 
   // Bounded range iteration
   // Iterate over the oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over the static fields.
   template <bool nv, class OopClosureType>
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -53,30 +53,40 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Klass* klass = java_lang_Class::as_Klass(obj);
     // We'll get NULL for primitive mirrors.
     if (klass != NULL) {
-      Devirtualizer<nv>::do_klass(closure, klass);
+      if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
+        // An anonymous class doesn't have its own class loader, so when handling
+        // the java mirror for an anonymous class we need to make sure its class
+        // loader data is claimed, this is done by calling do_cld explicitly.
+        // For non-anonymous classes the call to do_cld is made when the class
+        // loader itself is handled.
+        Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
+      } else {
+        Devirtualizer<nv>::do_klass(closure, klass);
+      }
+    } else {
+      // If klass is NULL then this a mirror for a primitive type.
+      // We don't have to follow them, since they are handled as strong
+      // roots in Universe::oops_do.
+      assert(java_lang_Class::is_primitive(obj), "Sanity check");
     }
   }
 
   oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
   InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
-
-  return oop_size(obj);
 }
 #endif
 
@@ -115,7 +125,7 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   if (Devirtualizer<nv>::do_metadata(closure)) {
@@ -129,8 +139,6 @@
   }
 
   oop_oop_iterate_statics_bounded<nv>(obj, closure, mr);
-
-  return oop_size(obj);
 }
 
 #define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
--- a/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -67,7 +67,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -88,19 +87,19 @@
 private:
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Reverse iteration
 #if INCLUDE_ALL_GCS
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
   // Bounded range iteration
   // Iterate over all oop fields and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Reference processing part of the iterators.
 
--- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -106,37 +106,27 @@
 }
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 
 #if INCLUDE_ALL_GCS
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::
-oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
+void InstanceRefKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
+  InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
 
   oop_oop_iterate_ref_processing<nv>(obj, closure);
-
-  return size;
 }
 #endif // INCLUDE_ALL_GCS
 
 
 template <bool nv, class OopClosureType>
-int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  // Get size before changing pointers
-  int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
+void InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
 
   oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
-
-  return size;
 }
 
 // Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
--- a/hotspot/src/share/vm/oops/klass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/klass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -572,7 +572,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  virtual void oop_ms_follow_contents(oop obj) = 0;
   virtual int  oop_ms_adjust_pointers(oop obj) = 0;
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -584,17 +583,17 @@
 
   // Iterators specialized to particular subtypes
   // of ExtendedOopClosure, to avoid closure virtual calls.
-#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                          \
-  virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
-  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                    \
-  virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                           \
+  virtual void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0;                        \
+  /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */                     \
+  virtual void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
 
 #if INCLUDE_ALL_GCS
-#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                    \
-  virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
+#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)                     \
+  virtual void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
@@ -661,35 +660,35 @@
 // Used to generate declarations in the *Klass header files.
 
 #define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix)                                    \
-  int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
-  int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
+  void oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure);                        \
+  void oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)              \
-  int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
+#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix)               \
+  void oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
 #endif // INCLUDE_ALL_GCS
 
 
 // Oop iteration macros for definitions.
 // Used to generate definitions in the *Klass.inline.hpp files.
 
-#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                \
+#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);                        \
 }
 
 #if INCLUDE_ALL_GCS
-#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)             \
-int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
-  return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                  \
+#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)              \
+void KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {  \
+  oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);                          \
 }
 #else
 #define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)
 #endif
 
-#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                           \
-int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
-  return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                          \
+#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix)                            \
+void KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) {  \
+  oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);                                  \
 }
 
 #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -105,7 +105,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -125,15 +124,15 @@
 
   // Iterate over oop elements and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Iterate over oop elements within mr, and metadata.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
   // Iterate over oop elements with indices within [start, end), and metadata.
   template <bool nv, class OopClosureType>
-  inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
+  inline void oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
 
   // Iterate over oop elements within [start, end), and metadata.
   // Specialized for [T = oop] or [T = narrowOop].
--- a/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -85,46 +85,31 @@
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
   assert (obj->is_array(), "obj must be array");
   objArrayOop a = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call.
-  int size = a->object_size();
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, obj->klass());
   }
 
   oop_oop_iterate_elements<nv>(a, closure);
-
-  return size;
 }
 
 template <bool nv, typename OopClosureType>
-int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+void ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
   assert(obj->is_array(), "obj must be array");
   objArrayOop a  = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call
-  int size = a->object_size();
-
   if (Devirtualizer<nv>::do_metadata(closure)) {
     Devirtualizer<nv>::do_klass(closure, a->klass());
   }
 
   oop_oop_iterate_elements_bounded<nv>(a, closure, mr);
-
-  return size;
 }
 
 template <bool nv, typename T, class OopClosureType>
 void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) {
-  if (Devirtualizer<nv>::do_metadata(closure)) {
-    Devirtualizer<nv>::do_klass(closure, a->klass());
-  }
-
   T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr<T>(start);
   T* high = (T*)a->base() + end;
 
@@ -134,21 +119,15 @@
 // Like oop_oop_iterate but only iterates over a specified range and only used
 // for objArrayOops.
 template <bool nv, class OopClosureType>
-int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
+void ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
   assert(obj->is_array(), "obj must be array");
   objArrayOop a  = objArrayOop(obj);
 
-  // Get size before changing pointers.
-  // Don't call size() or oop_size() since that is a virtual call
-  int size = a->object_size();
-
   if (UseCompressedOops) {
     oop_oop_iterate_range_specialized<nv, narrowOop>(a, closure, start, end);
   } else {
     oop_oop_iterate_range_specialized<nv, oop>(a, closure, start, end);
   }
-
-  return size;
 }
 
 #define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)    \
--- a/hotspot/src/share/vm/oops/objArrayOop.cpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/objArrayOop.cpp	Wed Sep 02 09:14:04 2015 +0200
@@ -46,8 +46,8 @@
 
 #define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
                                                                                    \
-int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {  \
-  return ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
+void objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) {  \
+  ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
 }
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -106,7 +106,7 @@
 
   // special iterators for index ranges, returns size of object
 #define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix)     \
-  int oop_iterate_range(OopClosureType* blk, int start, int end);
+  void oop_iterate_range(OopClosureType* blk, int start, int end);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DECL)
--- a/hotspot/src/share/vm/oops/oop.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -330,7 +330,6 @@
   // Garbage Collection support
 
   // Mark Sweep
-  void ms_follow_contents();
   // Adjust all pointers in this object to point at it's forwarded location and
   // return the size of this oop.  This is used by the MarkSweep collector.
   int  ms_adjust_pointers();
@@ -344,17 +343,25 @@
 
 
   // iterators, returns size of object
-#define OOP_ITERATE_DECL(OopClosureType, nv_suffix)                      \
-  int oop_iterate(OopClosureType* blk);                                  \
-  int oop_iterate(OopClosureType* blk, MemRegion mr);  // Only in mr.
+#define OOP_ITERATE_DECL(OopClosureType, nv_suffix)                     \
+  void oop_iterate(OopClosureType* blk);                                \
+  void oop_iterate(OopClosureType* blk, MemRegion mr);  // Only in mr.
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL)
 
+#define OOP_ITERATE_SIZE_DECL(OopClosureType, nv_suffix)                    \
+  int oop_iterate_size(OopClosureType* blk);                                \
+  int oop_iterate_size(OopClosureType* blk, MemRegion mr);  // Only in mr.
+
+  ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_SIZE_DECL)
+  ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_SIZE_DECL)
+
+
 #if INCLUDE_ALL_GCS
 
-#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)            \
-  int oop_iterate_backwards(OopClosureType* blk);
+#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix)  \
+  void oop_iterate_backwards(OopClosureType* blk);
 
   ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL)
   ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL)
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -695,10 +695,6 @@
   }
 }
 
-inline void oopDesc::ms_follow_contents() {
-  klass()->oop_ms_follow_contents(this);
-}
-
 inline int oopDesc::ms_adjust_pointers() {
   debug_only(int check_size = size());
   int s = klass()->oop_ms_adjust_pointers(this);
@@ -730,34 +726,50 @@
 }
 #endif
 
-#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                   \
-                                                                      \
-inline int oopDesc::oop_iterate(OopClosureType* blk) {                \
-  return klass()->oop_oop_iterate##nv_suffix(this, blk);              \
-}                                                                     \
-                                                                      \
-inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {  \
-  return klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);  \
+#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                    \
+                                                                       \
+inline void oopDesc::oop_iterate(OopClosureType* blk) {                \
+  klass()->oop_oop_iterate##nv_suffix(this, blk);                      \
+}                                                                      \
+                                                                       \
+inline void oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {  \
+  klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);          \
 }
 
+#define OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)               \
+                                                                       \
+inline int oopDesc::oop_iterate_size(OopClosureType* blk) {            \
+  Klass* k = klass();                                                  \
+  int size = size_given_klass(k);                                      \
+  k->oop_oop_iterate##nv_suffix(this, blk);                            \
+  return size;                                                         \
+}                                                                      \
+                                                                       \
+inline int oopDesc::oop_iterate_size(OopClosureType* blk,              \
+                                     MemRegion mr) {                   \
+  Klass* k = klass();                                                  \
+  int size = size_given_klass(k);                                      \
+  k->oop_oop_iterate_bounded##nv_suffix(this, blk, mr);                \
+  return size;                                                         \
+}
 
 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
   // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
   // the do_oop calls, but turns off all other features in ExtendedOopClosure.
   NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate(&cl);
+  return oop_iterate_size(&cl);
 }
 
 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
   NoHeaderExtendedOopClosure cl(blk);
-  return oop_iterate(&cl, mr);
+  return oop_iterate_size(&cl, mr);
 }
 
 #if INCLUDE_ALL_GCS
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)       \
                                                                     \
-inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {    \
-  return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);  \
+inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) {   \
+  klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);         \
 }
 #else
 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
@@ -765,6 +777,7 @@
 
 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix)  \
   OOP_ITERATE_DEFN(OopClosureType, nv_suffix)               \
+  OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix)          \
   OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
 
 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
--- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -75,7 +75,6 @@
   // GC specific object visitors
   //
   // Mark Sweep
-  void oop_ms_follow_contents(oop obj);
   int  oop_ms_adjust_pointers(oop obj);
 #if INCLUDE_ALL_GCS
   // Parallel Scavenge
@@ -90,15 +89,15 @@
 
  private:
   // The implementation used by all oop_oop_iterate functions in TypeArrayKlasses.
-  inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
+  inline void oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
 
   // Wraps oop_oop_iterate_impl to conform to macros.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate(oop obj, OopClosureType* closure);
+  inline void oop_oop_iterate(oop obj, OopClosureType* closure);
 
   // Wraps oop_oop_iterate_impl to conform to macros.
   template <bool nv, typename OopClosureType>
-  inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
+  inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
 
  public:
 
--- a/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -33,22 +33,20 @@
 
 class ExtendedOopClosure;
 
-inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
+inline void TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
   assert(obj->is_typeArray(),"must be a type array");
-  typeArrayOop t = typeArrayOop(obj);
   // Performance tweak: We skip iterating over the klass pointer since we
   // know that Universe::TypeArrayKlass never moves.
-  return t->object_size();
 }
 
 template <bool nv, typename OopClosureType>
-int TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
-  return oop_oop_iterate_impl(obj, closure);
+void TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
+  oop_oop_iterate_impl(obj, closure);
 }
 
 template <bool nv, typename OopClosureType>
-int TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
-  return oop_oop_iterate_impl(obj, closure);
+void TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
+  oop_oop_iterate_impl(obj, closure);
 }
 
 #define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)    \
--- a/hotspot/src/share/vm/utilities/stack.inline.hpp	Tue Sep 01 14:01:18 2015 -0700
+++ b/hotspot/src/share/vm/utilities/stack.inline.hpp	Wed Sep 02 09:14:04 2015 +0200
@@ -27,6 +27,17 @@
 
 #include "utilities/stack.hpp"
 
+// Stack is used by the GC code and in some hot paths a lot of the Stack
+// code gets inlined. This is generally good, but when too much code has
+// been inlined, no further inlining is allowed by GCC. Therefore we need
+// to prevent parts of the slow path in Stack to be inlined to allow other
+// code to be.
+#if defined(TARGET_COMPILER_gcc)
+#define NOINLINE __attribute__((noinline))
+#else
+#define NOINLINE
+#endif
+
 template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
                      size_t max_size):
   _seg_size(segment_size),
@@ -141,7 +152,7 @@
 }
 
 template <class E, MEMFLAGS F>
-void Stack<E, F>::push_segment()
+NOINLINE void Stack<E, F>::push_segment()
 {
   assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
   E* next;
@@ -269,4 +280,6 @@
   return _cur_seg + --_cur_seg_size;
 }
 
+#undef NOINLINE
+
 #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP