8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
Reviewed-by: brutisso, tschatzl, stefank
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Mar 12 15:22:45 2014 +0100
@@ -851,7 +851,58 @@
UpwardsObjectClosure* cl) {
assert_locked(freelistLock());
NOT_PRODUCT(verify_objects_initialized());
- Space::object_iterate_mem(mr, cl);
+ assert(!mr.is_empty(), "Should be non-empty");
+ // We use MemRegion(bottom(), end()) rather than used_region() below
+ // because the two are not necessarily equal for some kinds of
+ // spaces, in particular, certain kinds of free list spaces.
+ // We could use the more complicated but more precise:
+ // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
+ // but the slight imprecision seems acceptable in the assertion check.
+ assert(MemRegion(bottom(), end()).contains(mr),
+ "Should be within used space");
+ HeapWord* prev = cl->previous(); // max address from last time
+ if (prev >= mr.end()) { // nothing to do
+ return;
+ }
+ // This assert will not work when we go from cms space to perm
+ // space, and use same closure. Easy fix deferred for later. XXX YSR
+ // assert(prev == NULL || contains(prev), "Should be within space");
+
+ bool last_was_obj_array = false;
+ HeapWord *blk_start_addr, *region_start_addr;
+ if (prev > mr.start()) {
+ region_start_addr = prev;
+ blk_start_addr = prev;
+ // The previous invocation may have pushed "prev" beyond the
+ // last allocated block yet there may be still be blocks
+ // in this region due to a particular coalescing policy.
+ // Relax the assertion so that the case where the unallocated
+ // block is maintained and "prev" is beyond the unallocated
+ // block does not cause the assertion to fire.
+ assert((BlockOffsetArrayUseUnallocatedBlock &&
+ (!is_in(prev))) ||
+ (blk_start_addr == block_start(region_start_addr)), "invariant");
+ } else {
+ region_start_addr = mr.start();
+ blk_start_addr = block_start(region_start_addr);
+ }
+ HeapWord* region_end_addr = mr.end();
+ MemRegion derived_mr(region_start_addr, region_end_addr);
+ while (blk_start_addr < region_end_addr) {
+ const size_t size = block_size(blk_start_addr);
+ if (block_is_obj(blk_start_addr)) {
+ last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
+ } else {
+ last_was_obj_array = false;
+ }
+ blk_start_addr += size;
+ }
+ if (!last_was_obj_array) {
+ assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
+ "Should be within (closed) used space");
+ assert(blk_start_addr > prev, "Invariant");
+ cl->set_previous(blk_start_addr); // min address for next time
+ }
}
// Callers of this iterator beware: The closure application should
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Mar 12 15:22:45 2014 +0100
@@ -363,6 +363,12 @@
// obj_is_alive() to determine whether it is safe to iterate of
// an object.
void safe_object_iterate(ObjectClosure* blk);
+
+ // Iterate over all objects that intersect with mr, calling "cl->do_object"
+ // on each. There is an exception to this: if this closure has already
+ // been invoked on an object, it may skip such objects in some cases. This is
+ // Most likely to happen in an "upwards" (ascending address) iteration of
+ // MemRegions.
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// Requires that "mr" be entirely within the space.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Mar 12 15:22:45 2014 +0100
@@ -1498,6 +1498,19 @@
}
};
+// A version of ObjectClosure with "memory" (see _previous_address below)
+class UpwardsObjectClosure: public BoolObjectClosure {
+ HeapWord* _previous_address;
+ public:
+ UpwardsObjectClosure() : _previous_address(NULL) { }
+ void set_previous(HeapWord* addr) { _previous_address = addr; }
+ HeapWord* previous() { return _previous_address; }
+ // A return value of "true" can be used by the caller to decide
+ // if this object's end should *NOT* be recorded in
+ // _previous_address above.
+ virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
+};
+
// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via
--- a/hotspot/src/share/vm/memory/iterator.hpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/memory/iterator.hpp Wed Mar 12 15:22:45 2014 +0100
@@ -177,19 +177,6 @@
ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
};
-// A version of ObjectClosure with "memory" (see _previous_address below)
-class UpwardsObjectClosure: public BoolObjectClosure {
- HeapWord* _previous_address;
- public:
- UpwardsObjectClosure() : _previous_address(NULL) { }
- void set_previous(HeapWord* addr) { _previous_address = addr; }
- HeapWord* previous() { return _previous_address; }
- // A return value of "true" can be used by the caller to decide
- // if this object's end should *NOT* be recorded in
- // _previous_address above.
- virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
-};
-
// A version of ObjectClosure that is expected to be robust
// in the face of possibly uninitialized objects.
class ObjectClosureCareful : public ObjectClosure {
--- a/hotspot/src/share/vm/memory/space.cpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.cpp Wed Mar 12 15:22:45 2014 +0100
@@ -558,104 +558,11 @@
return bottom();
}
-
-void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
- assert(!mr.is_empty(), "Should be non-empty");
- // We use MemRegion(bottom(), end()) rather than used_region() below
- // because the two are not necessarily equal for some kinds of
- // spaces, in particular, certain kinds of free list spaces.
- // We could use the more complicated but more precise:
- // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
- // but the slight imprecision seems acceptable in the assertion check.
- assert(MemRegion(bottom(), end()).contains(mr),
- "Should be within used space");
- HeapWord* prev = cl->previous(); // max address from last time
- if (prev >= mr.end()) { // nothing to do
- return;
- }
- // This assert will not work when we go from cms space to perm
- // space, and use same closure. Easy fix deferred for later. XXX YSR
- // assert(prev == NULL || contains(prev), "Should be within space");
-
- bool last_was_obj_array = false;
- HeapWord *blk_start_addr, *region_start_addr;
- if (prev > mr.start()) {
- region_start_addr = prev;
- blk_start_addr = prev;
- // The previous invocation may have pushed "prev" beyond the
- // last allocated block yet there may be still be blocks
- // in this region due to a particular coalescing policy.
- // Relax the assertion so that the case where the unallocated
- // block is maintained and "prev" is beyond the unallocated
- // block does not cause the assertion to fire.
- assert((BlockOffsetArrayUseUnallocatedBlock &&
- (!is_in(prev))) ||
- (blk_start_addr == block_start(region_start_addr)), "invariant");
- } else {
- region_start_addr = mr.start();
- blk_start_addr = block_start(region_start_addr);
- }
- HeapWord* region_end_addr = mr.end();
- MemRegion derived_mr(region_start_addr, region_end_addr);
- while (blk_start_addr < region_end_addr) {
- const size_t size = block_size(blk_start_addr);
- if (block_is_obj(blk_start_addr)) {
- last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
- } else {
- last_was_obj_array = false;
- }
- blk_start_addr += size;
- }
- if (!last_was_obj_array) {
- assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
- "Should be within (closed) used space");
- assert(blk_start_addr > prev, "Invariant");
- cl->set_previous(blk_start_addr); // min address for next time
- }
-}
-
bool Space::obj_is_alive(const HeapWord* p) const {
assert (block_is_obj(p), "The address should point to an object");
return true;
}
-void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
- assert(!mr.is_empty(), "Should be non-empty");
- assert(used_region().contains(mr), "Should be within used space");
- HeapWord* prev = cl->previous(); // max address from last time
- if (prev >= mr.end()) { // nothing to do
- return;
- }
- // See comment above (in more general method above) in case you
- // happen to use this method.
- assert(prev == NULL || is_in_reserved(prev), "Should be within space");
-
- bool last_was_obj_array = false;
- HeapWord *obj_start_addr, *region_start_addr;
- if (prev > mr.start()) {
- region_start_addr = prev;
- obj_start_addr = prev;
- assert(obj_start_addr == block_start(region_start_addr), "invariant");
- } else {
- region_start_addr = mr.start();
- obj_start_addr = block_start(region_start_addr);
- }
- HeapWord* region_end_addr = mr.end();
- MemRegion derived_mr(region_start_addr, region_end_addr);
- while (obj_start_addr < region_end_addr) {
- oop obj = oop(obj_start_addr);
- const size_t size = obj->size();
- last_was_obj_array = cl->do_object_bm(obj, derived_mr);
- obj_start_addr += size;
- }
- if (!last_was_obj_array) {
- assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
- "Should be within (closed) used space");
- assert(obj_start_addr > prev, "Invariant");
- cl->set_previous(obj_start_addr); // min address for next time
- }
-}
-
#if INCLUDE_ALL_GCS
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
--- a/hotspot/src/share/vm/memory/space.hpp Wed Mar 26 16:33:13 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp Wed Mar 12 15:22:45 2014 +0100
@@ -204,13 +204,6 @@
// objects whose internal references point to objects in the space.
virtual void safe_object_iterate(ObjectClosure* blk) = 0;
- // Iterate over all objects that intersect with mr, calling "cl->do_object"
- // on each. There is an exception to this: if this closure has already
- // been invoked on an object, it may skip such objects in some cases. This is
- // Most likely to happen in an "upwards" (ascending address) iteration of
- // MemRegions.
- virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
// Iterate over as many initialized objects in the space as possible,
// calling "cl.do_object_careful" on each. Return NULL if all objects
// in the space (at the start of the iteration) were iterated over.
@@ -840,7 +833,6 @@
// For contiguous spaces this method will iterate safely over objects
// in the space (i.e., between bottom and top) when at a safepoint.
void safe_object_iterate(ObjectClosure* blk);
- void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
// iterates on objects up to the safe limit
HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
HeapWord* concurrent_iteration_safe_limit() {