8043243: convert SCAN_AND_FORWARD, SCAN_AND_ADJUST_POINTERS, SCAN_AND_COMPACT macros to methods
authormlarsson
Thu, 30 Oct 2014 12:45:22 +0100
changeset 27624 fe43edc5046d
parent 27623 3f190bd7182f
child 27625 07829380b8cd
8043243: convert SCAN_AND_FORWARD, SCAN_AND_ADJUST_POINTERS, SCAN_AND_COMPACT macros to methods Reviewed-by: mgerdin, kbarrett
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
hotspot/src/share/vm/memory/space.cpp
hotspot/src/share/vm/memory/space.hpp
hotspot/src/share/vm/memory/space.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Oct 30 12:45:22 2014 +0100
@@ -2083,17 +2083,13 @@
 }
 
 // Support for compaction
-
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
+  scan_and_forward(this, cp);
   // Prepare_for_compaction() uses the space between live objects
   // so that later phase can skip dead space quickly.  So verification
   // of the free lists doesn't work after.
 }
 
-#define obj_size(q) adjustObjectSize(oop(q)->size())
-#define adjust_obj_size(s) adjustObjectSize(s)
-
 void CompactibleFreeListSpace::adjust_pointers() {
   // In other versions of adjust_pointers(), a bail out
   // based on the amount of live data in the generation
@@ -2101,12 +2097,12 @@
   // Cannot test used() == 0 here because the free lists have already
   // been mangled by the compaction.
 
-  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
+  scan_and_adjust_pointers(this);
   // See note about verification in prepare_for_compaction().
 }
 
 void CompactibleFreeListSpace::compact() {
-  SCAN_AND_COMPACT(obj_size);
+  scan_and_compact(this);
 }
 
 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Oct 30 12:45:22 2014 +0100
@@ -73,6 +73,13 @@
   friend class CMSCollector;
   // Local alloc buffer for promotion into this space.
   friend class CFLS_LAB;
+  // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
 
   // "Size" of chunks of work (executed during parallel remark phases
   // of CMS collection); this probably belongs in CMSCollector, although
@@ -288,6 +295,28 @@
     _bt.freed(start, size);
   }
 
+  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return end();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
+  }
+
+  inline size_t adjust_obj_size(size_t size) const {
+    return adjustObjectSize(size);
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return adjustObjectSize(oop(addr)->size());
+  }
+
  protected:
   // Reset the indexed free list to its initial empty condition.
   void resetIndexedFreeListArray();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 30 12:45:22 2014 +0100
@@ -960,6 +960,10 @@
   verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
 }
 
+void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
+  scan_and_forward(this, cp);
+}
+
 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
 // away eventually.
 
@@ -1043,12 +1047,6 @@
   }
 }
 
-#define block_is_always_obj(q) true
-void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
-}
-#undef block_is_always_obj
-
 G1OffsetTableContigSpace::
 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                          MemRegion mr) :
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 30 12:45:22 2014 +0100
@@ -187,8 +187,6 @@
   HeapWord* block_start(const void* p);
   HeapWord* block_start_const(const void* p) const;
 
-  void prepare_for_compaction(CompactPoint* cp);
-
   // Add offset table update.
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
@@ -210,6 +208,9 @@
 
 class HeapRegion: public G1OffsetTableContigSpace {
   friend class VMStructs;
+  // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
  private:
 
   // The remembered set for this region.
@@ -219,6 +220,20 @@
 
   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return HeapRegion::block_size(addr); // Avoid virtual call
+  }
+
  protected:
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
@@ -340,6 +355,9 @@
   // and the amount of unallocated words if called on top()
   size_t block_size(const HeapWord* p) const;
 
+  // Override for scan_and_forward support.
+  void prepare_for_compaction(CompactPoint* cp);
+
   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu Oct 30 12:45:22 2014 +0100
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/shared/markSweep.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "oops/markOop.inline.hpp"
 #include "utilities/stack.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/memory/space.cpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.cpp	Thu Oct 30 12:45:22 2014 +0100
@@ -438,52 +438,8 @@
   }
 }
 
-#define block_is_always_obj(q) true
-#define obj_size(q) oop(q)->size()
-#define adjust_obj_size(s) s
-
-void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
-}
-
-// Faster object search.
 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
-}
-
-void Space::adjust_pointers() {
-  // adjust all the interior pointers to point at the new locations of objects
-  // Used by MarkSweep::mark_sweep_phase3()
-
-  // First check to see if there is any work to be done.
-  if (used() == 0) {
-    return;  // Nothing to do.
-  }
-
-  // Otherwise...
-  HeapWord* q = bottom();
-  HeapWord* t = end();
-
-  debug_only(HeapWord* prev_q = NULL);
-  while (q < t) {
-    if (oop(q)->is_gc_marked()) {
-      // q is alive
-
-      // point all the oops to the new location
-      size_t size = oop(q)->adjust_pointers();
-
-      debug_only(prev_q = q);
-
-      q += size;
-    } else {
-      // q is not a live object.  But we're not in a compactible space,
-      // So we don't have live ranges.
-      debug_only(prev_q = q);
-      q += block_size(q);
-      assert(q > prev_q, "we should be moving forward through memory");
-    }
-  }
-  assert(q == t, "just checking");
+  scan_and_forward(this, cp);
 }
 
 void CompactibleSpace::adjust_pointers() {
@@ -492,11 +448,11 @@
     return;   // Nothing to do.
   }
 
-  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
+  scan_and_adjust_pointers(this);
 }
 
 void CompactibleSpace::compact() {
-  SCAN_AND_COMPACT(obj_size);
+  scan_and_compact(this);
 }
 
 void Space::print_short() const { print_short_on(tty); }
--- a/hotspot/src/share/vm/memory/space.hpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp	Thu Oct 30 12:45:22 2014 +0100
@@ -46,6 +46,7 @@
 // - Space               -- an abstract base class describing a heap area
 //   - CompactibleSpace  -- a space supporting compaction
 //     - CompactibleFreeListSpace -- (used for CMS generation)
+//     - G1OffsetTableContigSpace -- G1 version of OffsetTableContigSpace
 //     - ContiguousSpace -- a compactible space in which all free space
 //                          is contiguous
 //       - EdenSpace     -- contiguous space used as nursery
@@ -238,7 +239,7 @@
 
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
-  virtual void adjust_pointers();
+  virtual void adjust_pointers() = 0;
 
   // PrintHeapAtGC support
   virtual void print() const;
@@ -339,7 +340,36 @@
 // necessarily, a space that is normally contiguous.  But, for example, a
 // free-list-based space whose normal collection is a mark-sweep without
 // compaction could still support compaction in full GC's.
-
+//
+// The compaction operations are implemented by the
+// scan_and_{adjust_pointers,compact,forward} function templates.
+// The following are, non-virtual, auxiliary functions used by these function templates:
+// - scan_limit()
+// - scanned_block_is_obj()
+// - scanned_block_size()
+// - adjust_obj_size()
+// - obj_size()
+// These functions are to be used exclusively by the scan_and_* function templates,
+// and must be defined for all (non-abstract) subclasses of CompactibleSpace.
+//
+// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
+// in any of the auxiliary functions must also override the corresponding
+// prepare_for_compaction/adjust_pointers/compact functions using them.
+// If not, such changes will not be used or have no effect on the compaction operations.
+//
+// This translates to the following dependencies:
+// Overrides/definitions of
+//  - scan_limit
+//  - scanned_block_is_obj
+//  - scanned_block_size
+// require override/definition of prepare_for_compaction().
+// Similar dependencies exist between
+//  - adjust_obj_size  and adjust_pointers()
+//  - obj_size         and compact().
+//
+// Additionally, this also means that changes to block_size() or block_is_obj() that
+// should be effective during the compaction operations must provide a corresponding
+// definition of scanned_block_size/scanned_block_is_obj respectively.
 class CompactibleSpace: public Space {
   friend class VMStructs;
   friend class CompactibleFreeListSpace;
@@ -347,6 +377,15 @@
   HeapWord* _compaction_top;
   CompactibleSpace* _next_compaction_space;
 
+  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  inline size_t adjust_obj_size(size_t size) const {
+    return size;
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return oop(addr)->size();
+  }
+
 public:
   CompactibleSpace() :
    _compaction_top(NULL), _next_compaction_space(NULL) {}
@@ -390,7 +429,7 @@
   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
   // this phase as if the final copy had occurred; if so, "cp->threshold"
   // indicates when the next such action should be taken.
-  virtual void prepare_for_compaction(CompactPoint* cp);
+  virtual void prepare_for_compaction(CompactPoint* cp) = 0;
   // MarkSweep support phase3
   virtual void adjust_pointers();
   // MarkSweep support phase4
@@ -449,6 +488,25 @@
   // words remaining after this operation.
   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
                         size_t word_len);
+
+  // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
+  // The space argument should be a subclass of CompactibleSpace, implementing
+  // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
+  // and possibly also overriding obj_size(), and adjust_obj_size().
+  // These functions should avoid virtual calls whenever possible.
+
+  // Frequently calls adjust_obj_size().
+  template <class SpaceType>
+  static inline void scan_and_adjust_pointers(SpaceType* space);
+
+  // Frequently calls obj_size().
+  template <class SpaceType>
+  static inline void scan_and_compact(SpaceType* space);
+
+  // Frequently calls scanned_block_is_obj() and scanned_block_size().
+  // Requires the scan_limit() function.
+  template <class SpaceType>
+  static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 };
 
 class GenSpaceMangler;
@@ -458,6 +516,25 @@
 class ContiguousSpace: public CompactibleSpace {
   friend class OneContigSpaceCardGeneration;
   friend class VMStructs;
+  // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
+
+ private:
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return oop(addr)->size();
+  }
+
  protected:
   HeapWord* _top;
   HeapWord* _concurrent_iteration_safe_limit;
@@ -622,7 +699,6 @@
   // Used to increase collection frequency.  "factor" of 0 means entire
   // space.
   void allocate_temporary_filler(int factor);
-
 };
 
 
--- a/hotspot/src/share/vm/memory/space.inline.hpp	Thu Oct 30 10:51:06 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.inline.hpp	Thu Oct 30 12:45:22 2014 +0100
@@ -25,6 +25,9 @@
 #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
 
+#include "gc_implementation/shared/liveRange.hpp"
+#include "gc_implementation/shared/markSweep.inline.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "memory/space.hpp"
 #include "memory/universe.hpp"
@@ -35,272 +38,6 @@
   return block_start_const(p);
 }
 
-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
-  /* Compute the new addresses for the live objects and store it in the mark \
-   * Used by universe::mark_sweep_phase2()                                   \
-   */                                                                        \
-  HeapWord* compact_top; /* This is where we are currently compacting to. */ \
-                                                                             \
-  /* We're sure to be here before any objects are compacted into this        \
-   * space, so this is a good time to initialize this:                       \
-   */                                                                        \
-  set_compaction_top(bottom());                                              \
-                                                                             \
-  if (cp->space == NULL) {                                                   \
-    assert(cp->gen != NULL, "need a generation");                            \
-    assert(cp->threshold == NULL, "just checking");                          \
-    assert(cp->gen->first_compaction_space() == this, "just checking");      \
-    cp->space = cp->gen->first_compaction_space();                           \
-    compact_top = cp->space->bottom();                                       \
-    cp->space->set_compaction_top(compact_top);                              \
-    cp->threshold = cp->space->initialize_threshold();                       \
-  } else {                                                                   \
-    compact_top = cp->space->compaction_top();                               \
-  }                                                                          \
-                                                                             \
-  /* We allow some amount of garbage towards the bottom of the space, so     \
-   * we don't start compacting before there is a significant gain to be made.\
-   * Occasionally, we want to ensure a full compaction, which is determined  \
-   * by the MarkSweepAlwaysCompactCount parameter.                           \
-   */                                                                        \
-  uint invocations = MarkSweep::total_invocations();                         \
-  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
-                                                                             \
-  size_t allowed_deadspace = 0;                                              \
-  if (skip_dead) {                                                           \
-    const size_t ratio = allowed_dead_ratio();                               \
-    allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
-  }                                                                          \
-                                                                             \
-  HeapWord* q = bottom();                                                    \
-  HeapWord* t = scan_limit();                                                \
-                                                                             \
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
-                                   live object. */                           \
-  HeapWord*  first_dead = end();/* The first dead object. */                 \
-  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
-                                   first header of preceding free area. */   \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  const intx interval = PrefetchScanIntervalInBytes;                         \
-                                                                             \
-  while (q < t) {                                                            \
-    assert(!block_is_obj(q) ||                                               \
-           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
-           oop(q)->mark()->has_bias_pattern(),                               \
-           "these are the only valid states during a mark sweep");           \
-    if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
-      /* prefetch beyond q */                                                \
-      Prefetch::write(q, interval);                                          \
-      size_t size = block_size(q);                                           \
-      compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
-      q += size;                                                             \
-      end_of_live = q;                                                       \
-    } else {                                                                 \
-      /* run over all the contiguous dead objects */                         \
-      HeapWord* end = q;                                                     \
-      do {                                                                   \
-        /* prefetch beyond end */                                            \
-        Prefetch::write(end, interval);                                      \
-        end += block_size(end);                                              \
-      } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
-                                                                             \
-      /* see if we might want to pretend this object is alive so that        \
-       * we don't have to compact quite as often.                            \
-       */                                                                    \
-      if (allowed_deadspace > 0 && q == compact_top) {                       \
-        size_t sz = pointer_delta(end, q);                                   \
-        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
-          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
-          q = end;                                                           \
-          end_of_live = end;                                                 \
-          continue;                                                          \
-        }                                                                    \
-      }                                                                      \
-                                                                             \
-      /* otherwise, it really is a free region. */                           \
-                                                                             \
-      /* for the previous LiveRange, record the end of the live objects. */  \
-      if (liveRange) {                                                       \
-        liveRange->set_end(q);                                               \
-      }                                                                      \
-                                                                             \
-      /* record the current LiveRange object.                                \
-       * liveRange->start() is overlaid on the mark word.                    \
-       */                                                                    \
-      liveRange = (LiveRange*)q;                                             \
-      liveRange->set_start(end);                                             \
-      liveRange->set_end(end);                                               \
-                                                                             \
-      /* see if this is the first dead region. */                            \
-      if (q < first_dead) {                                                  \
-        first_dead = q;                                                      \
-      }                                                                      \
-                                                                             \
-      /* move on to the next object */                                       \
-      q = end;                                                               \
-    }                                                                        \
-  }                                                                          \
-                                                                             \
-  assert(q == t, "just checking");                                           \
-  if (liveRange != NULL) {                                                   \
-    liveRange->set_end(q);                                                   \
-  }                                                                          \
-  _end_of_live = end_of_live;                                                \
-  if (end_of_live < first_dead) {                                            \
-    first_dead = end_of_live;                                                \
-  }                                                                          \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  /* save the compaction_top of the compaction space. */                     \
-  cp->space->set_compaction_top(compact_top);                                \
-}
-
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
-  /* adjust all the interior pointers to point at the new locations of objects  \
-   * Used by MarkSweep::mark_sweep_phase3() */                                  \
-                                                                                \
-  HeapWord* q = bottom();                                                       \
-  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
-                                                                                \
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    /* we have a chunk of the space which hasn't moved and we've                \
-     * reinitialized the mark word during the previous pass, so we can't        \
-     * use is_gc_marked for the traversal. */                                   \
-    HeapWord* end = _first_dead;                                                \
-                                                                                \
-    while (q < end) {                                                           \
-      /* I originally tried to conjoin "block_start(q) == q" to the             \
-       * assertion below, but that doesn't work, because you can't              \
-       * accurately traverse previous objects to get to the current one         \
-       * after their pointers have been                                         \
-       * updated, until the actual compaction is done.  dld, 4/00 */            \
-      assert(block_is_obj(q),                                                   \
-             "should be at block boundaries, and should be looking at objs");   \
-                                                                                \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-                                                                                \
-      q += size;                                                                \
-    }                                                                           \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ This is funky.  Using this to read the previously written          \
-       * LiveRange.  See also use below. */                                     \
-      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx interval = PrefetchScanIntervalInBytes;                            \
-                                                                                \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-  while (q < t) {                                                               \
-    /* prefetch beyond q */                                                     \
-    Prefetch::write(q, interval);                                               \
-    if (oop(q)->is_gc_marked()) {                                               \
-      /* q is alive */                                                          \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    } else {                                                                    \
-      /* q is not a live object, so its mark should point at the next           \
-       * live object */                                                         \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  assert(q == t, "just checking");                                              \
-}
-
-#define SCAN_AND_COMPACT(obj_size) {                                            \
-  /* Copy all live objects to their new location                                \
-   * Used by MarkSweep::mark_sweep_phase4() */                                  \
-                                                                                \
-  HeapWord*       q = bottom();                                                 \
-  HeapWord* const t = _end_of_live;                                             \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    debug_only(                                                                 \
-    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
-     * the mark word during the previous pass, so we can't use is_gc_marked for \
-     * the traversal. */                                                        \
-    HeapWord* const end = _first_dead;                                          \
-                                                                                \
-    while (q < end) {                                                           \
-      size_t size = obj_size(q);                                                \
-      assert(!oop(q)->is_gc_marked(),                                           \
-             "should be unmarked (special dense prefix handling)");             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-    )  /* debug_only */                                                         \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ Funky */                                                           \
-      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
-  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
-  while (q < t) {                                                               \
-    if (!oop(q)->is_gc_marked()) {                                              \
-      /* mark is pointer to next marked oop */                                  \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    } else {                                                                    \
-      /* prefetch beyond q */                                                   \
-      Prefetch::read(q, scan_interval);                                         \
-                                                                                \
-      /* size and destination */                                                \
-      size_t size = obj_size(q);                                                \
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
-                                                                                \
-      /* prefetch beyond compaction_top */                                      \
-      Prefetch::write(compaction_top, copy_interval);                           \
-                                                                                \
-      /* copy object and reinit its mark */                                     \
-      assert(q != compaction_top, "everything in this pass should be moving");  \
-      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
-      oop(compaction_top)->init_mark();                                         \
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
-                                                                                \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  /* Let's remember if we were empty before we did the compaction. */           \
-  bool was_empty = used_region().is_empty();                                    \
-  /* Reset space after compaction is complete */                                \
-  reset_after_compaction();                                                     \
-  /* We do this clear, below, since it has overloaded meanings for some */      \
-  /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
-  /* compacted into will have had their offset table thresholds updated */      \
-  /* continuously, but those that weren't need to have their thresholds */      \
-  /* re-initialized.  Also mangles unused area for debugging.           */      \
-  if (used_region().is_empty()) {                                               \
-    if (!was_empty) clear(SpaceDecorator::Mangle);                              \
-  } else {                                                                      \
-    if (ZapUnusedHeapArea) mangle_unused_area();                                \
-  }                                                                             \
-}
-
 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
   HeapWord* res = ContiguousSpace::allocate(size);
   if (res != NULL) {
@@ -334,4 +71,263 @@
   return _offsets.block_start(p);
 }
 
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
+  // Compute the new addresses for the live objects and store it in the mark
+  // Used by universe::mark_sweep_phase2()
+  HeapWord* compact_top; // This is where we are currently compacting to.
+
+  // We're sure to be here before any objects are compacted into this
+  // space, so this is a good time to initialize this:
+  space->set_compaction_top(space->bottom());
+
+  if (cp->space == NULL) {
+    assert(cp->gen != NULL, "need a generation");
+    assert(cp->threshold == NULL, "just checking");
+    assert(cp->gen->first_compaction_space() == space, "just checking");
+    cp->space = cp->gen->first_compaction_space();
+    compact_top = cp->space->bottom();
+    cp->space->set_compaction_top(compact_top);
+    cp->threshold = cp->space->initialize_threshold();
+  } else {
+    compact_top = cp->space->compaction_top();
+  }
+
+  // We allow some amount of garbage towards the bottom of the space, so
+  // we don't start compacting before there is a significant gain to be made.
+  // Occasionally, we want to ensure a full compaction, which is determined
+  // by the MarkSweepAlwaysCompactCount parameter.
+  uint invocations = MarkSweep::total_invocations();
+  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
+
+  size_t allowed_deadspace = 0;
+  if (skip_dead) {
+    const size_t ratio = space->allowed_dead_ratio();
+    allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
+  }
+
+  HeapWord* q = space->bottom();
+  HeapWord* t = space->scan_limit();
+
+  HeapWord*  end_of_live= q;            // One byte beyond the last byte of the last
+                                        // live object.
+  HeapWord*  first_dead = space->end(); // The first dead object.
+  LiveRange* liveRange  = NULL;         // The current live range, recorded in the
+                                        // first header of preceding free area.
+  space->_first_dead = first_dead;
+
+  const intx interval = PrefetchScanIntervalInBytes;
+
+  while (q < t) {
+    assert(!space->scanned_block_is_obj(q) ||
+           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
+           oop(q)->mark()->has_bias_pattern(),
+           "these are the only valid states during a mark sweep");
+    if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
+      // prefetch beyond q
+      Prefetch::write(q, interval);
+      size_t size = space->scanned_block_size(q);
+      compact_top = cp->space->forward(oop(q), size, cp, compact_top);
+      q += size;
+      end_of_live = q;
+    } else {
+      // run over all the contiguous dead objects
+      HeapWord* end = q;
+      do {
+        // prefetch beyond end
+        Prefetch::write(end, interval);
+        end += space->scanned_block_size(end);
+      } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
+
+      // see if we might want to pretend this object is alive so that
+      // we don't have to compact quite as often.
+      if (allowed_deadspace > 0 && q == compact_top) {
+        size_t sz = pointer_delta(end, q);
+        if (space->insert_deadspace(allowed_deadspace, q, sz)) {
+          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
+          q = end;
+          end_of_live = end;
+          continue;
+        }
+      }
+
+      // otherwise, it really is a free region.
+
+      // for the previous LiveRange, record the end of the live objects.
+      if (liveRange) {
+        liveRange->set_end(q);
+      }
+
+      // record the current LiveRange object.
+      // liveRange->start() is overlaid on the mark word.
+      liveRange = (LiveRange*)q;
+      liveRange->set_start(end);
+      liveRange->set_end(end);
+
+      // see if this is the first dead region.
+      if (q < first_dead) {
+        first_dead = q;
+      }
+
+      // move on to the next object
+      q = end;
+    }
+  }
+
+  assert(q == t, "just checking");
+  if (liveRange != NULL) {
+    liveRange->set_end(q);
+  }
+  space->_end_of_live = end_of_live;
+  if (end_of_live < first_dead) {
+    first_dead = end_of_live;
+  }
+  space->_first_dead = first_dead;
+
+  // save the compaction_top of the compaction space.
+  cp->space->set_compaction_top(compact_top);
+}
+
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
+  // adjust all the interior pointers to point at the new locations of objects
+  // Used by MarkSweep::mark_sweep_phase3()
+
+  HeapWord* q = space->bottom();
+  HeapWord* t = space->_end_of_live;  // Established by "prepare_for_compaction".
+
+  assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
+
+  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+    // we have a chunk of the space which hasn't moved and we've
+    // reinitialized the mark word during the previous pass, so we can't
+    // use is_gc_marked for the traversal.
+    HeapWord* end = space->_first_dead;
+
+    while (q < end) {
+      // I originally tried to conjoin "block_start(q) == q" to the
+      // assertion below, but that doesn't work, because you can't
+      // accurately traverse previous objects to get to the current one
+      // after their pointers have been
+      // updated, until the actual compaction is done.  dld, 4/00
+      assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
+
+      // point all the oops to the new location
+      size_t size = oop(q)->adjust_pointers();
+      size = space->adjust_obj_size(size);
+
+      q += size;
+    }
+
+    if (space->_first_dead == t) {
+      q = t;
+    } else {
+      // $$$ This is funky.  Using this to read the previously written
+      // LiveRange.  See also use below.
+      q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
+    }
+  }
+
+  const intx interval = PrefetchScanIntervalInBytes;
+
+  debug_only(HeapWord* prev_q = NULL);
+  while (q < t) {
+    // prefetch beyond q
+    Prefetch::write(q, interval);
+    if (oop(q)->is_gc_marked()) {
+      // q is alive
+      // point all the oops to the new location
+      size_t size = oop(q)->adjust_pointers();
+      size = space->adjust_obj_size(size);
+      debug_only(prev_q = q);
+      q += size;
+    } else {
+      // q is not a live object, so its mark should point at the next
+      // live object
+      debug_only(prev_q = q);
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      assert(q > prev_q, "we should be moving forward through memory");
+    }
+  }
+
+  assert(q == t, "just checking");
+}
+
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
+  // Copy all live objects to their new location
+  // Used by MarkSweep::mark_sweep_phase4()
+
+  HeapWord*       q = space->bottom();
+  HeapWord* const t = space->_end_of_live;
+  debug_only(HeapWord* prev_q = NULL);
+
+  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+    #ifdef ASSERT // Debug only
+      // we have a chunk of the space which hasn't moved and we've reinitialized
+      // the mark word during the previous pass, so we can't use is_gc_marked for
+      // the traversal.
+      HeapWord* const end = space->_first_dead;
+
+      while (q < end) {
+        size_t size = space->obj_size(q);
+        assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
+        prev_q = q;
+        q += size;
+      }
+    #endif
+
+    if (space->_first_dead == t) {
+      q = t;
+    } else {
+      // $$$ Funky
+      q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
+    }
+  }
+
+  const intx scan_interval = PrefetchScanIntervalInBytes;
+  const intx copy_interval = PrefetchCopyIntervalInBytes;
+  while (q < t) {
+    if (!oop(q)->is_gc_marked()) {
+      // mark is pointer to next marked oop
+      debug_only(prev_q = q);
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      assert(q > prev_q, "we should be moving forward through memory");
+    } else {
+      // prefetch beyond q
+      Prefetch::read(q, scan_interval);
+
+      // size and destination
+      size_t size = space->obj_size(q);
+      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
+
+      // prefetch beyond compaction_top
+      Prefetch::write(compaction_top, copy_interval);
+
+      // copy object and reinit its mark
+      assert(q != compaction_top, "everything in this pass should be moving");
+      Copy::aligned_conjoint_words(q, compaction_top, size);
+      oop(compaction_top)->init_mark();
+      assert(oop(compaction_top)->klass() != NULL, "should have a class");
+
+      debug_only(prev_q = q);
+      q += size;
+    }
+  }
+
+  // Let's remember if we were empty before we did the compaction.
+  bool was_empty = space->used_region().is_empty();
+  // Reset space after compaction is complete
+  space->reset_after_compaction();
+  // We do this clear, below, since it has overloaded meanings for some
+  // space subtypes.  For example, OffsetTableContigSpace's that were
+  // compacted into will have had their offset table thresholds updated
+  // continuously, but those that weren't need to have their thresholds
+  // re-initialized.  Also mangles unused area for debugging.
+  if (space->used_region().is_empty()) {
+    if (!was_empty) space->clear(SpaceDecorator::Mangle);
+  } else {
+    if (ZapUnusedHeapArea) space->mangle_unused_area();
+  }
+}
 #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP