--- a/hotspot/src/os/linux/vm/os_linux.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -2278,7 +2278,7 @@
dlsym(RTLD_DEFAULT, "sched_getcpu")));
if (sched_getcpu() != -1) { // Does it work?
- void *handle = dlopen("libnuma.so", RTLD_LAZY);
+ void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
if (handle != NULL) {
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
dlsym(handle, "numa_node_to_cpus")));
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -2658,6 +2658,12 @@
top += r;
cur++;
}
+ if (bottom == 0) {
+ // Handle a situation, when the OS reports no memory available.
+ // Assume UMA architecture.
+ ids[0] = 0;
+ return 1;
+ }
return bottom;
}
@@ -4581,7 +4587,7 @@
}
void os::Solaris::liblgrp_init() {
- void *handle = dlopen("liblgrp.so", RTLD_LAZY);
+ void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
if (handle != NULL) {
os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -71,8 +71,15 @@
TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk* tc = (TreeChunk*) addr;
assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
- assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL,
- "Space should be clear");
+ // The space in the heap will have been mangled initially but
+ // is not remangled when a free chunk is returned to the free list
+ // (since it is used to maintain the chunk on the free list).
+ assert((ZapUnusedHeapArea &&
+ SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
+ SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
+ SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
+ (tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
+ "Space should be clear or mangled");
tc->setSize(size);
tc->linkPrev(NULL);
tc->linkNext(NULL);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -54,7 +54,7 @@
_collector(NULL)
{
_bt.set_space(this);
- initialize(mr, true);
+ initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
// We have all of "mr", all of which we place in the dictionary
// as one big chunk. We'll need to decide here which of several
// possible alternative dictionary implementations to use. For
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -22,7 +22,6 @@
*
*/
-
// A FreeBlockDictionary is an abstract superclass that will allow
// a number of alternative implementations in the future.
class FreeBlockDictionary: public CHeapObj {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -85,6 +85,8 @@
}
debug_only(void* prev_addr() const { return (void*)&_prev; })
+ debug_only(void* next_addr() const { return (void*)&_next; })
+ debug_only(void* size_addr() const { return (void*)&_size; })
size_t size() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Mon Jul 28 15:30:23 2008 -0700
@@ -28,6 +28,7 @@
binaryTreeDictionary.cpp binaryTreeDictionary.hpp
binaryTreeDictionary.cpp globals.hpp
binaryTreeDictionary.cpp ostream.hpp
+binaryTreeDictionary.cpp spaceDecorator.hpp
binaryTreeDictionary.hpp freeBlockDictionary.hpp
binaryTreeDictionary.hpp freeList.hpp
@@ -114,6 +115,7 @@
compactibleFreeListSpace.cpp liveRange.hpp
compactibleFreeListSpace.cpp oop.inline.hpp
compactibleFreeListSpace.cpp resourceArea.hpp
+compactibleFreeListSpace.cpp spaceDecorator.hpp
compactibleFreeListSpace.cpp universe.inline.hpp
compactibleFreeListSpace.cpp vmThread.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew Mon Jul 28 15:30:23 2008 -0700
@@ -22,16 +22,17 @@
//
//
-asParNewGeneration.hpp adaptiveSizePolicy.hpp
-asParNewGeneration.hpp parNewGeneration.hpp
+asParNewGeneration.hpp adaptiveSizePolicy.hpp
+asParNewGeneration.hpp parNewGeneration.hpp
-asParNewGeneration.cpp asParNewGeneration.hpp
-asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
+asParNewGeneration.cpp asParNewGeneration.hpp
+asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp
-asParNewGeneration.cpp defNewGeneration.inline.hpp
-asParNewGeneration.cpp oop.pcgc.inline.hpp
-asParNewGeneration.cpp parNewGeneration.hpp
+asParNewGeneration.cpp defNewGeneration.inline.hpp
+asParNewGeneration.cpp oop.pcgc.inline.hpp
+asParNewGeneration.cpp parNewGeneration.hpp
asParNewGeneration.cpp referencePolicy.hpp
+asParNewGeneration.cpp spaceDecorator.hpp
parCardTableModRefBS.cpp allocation.inline.hpp
parCardTableModRefBS.cpp cardTableModRefBS.hpp
@@ -75,6 +76,7 @@
parNewGeneration.cpp resourceArea.hpp
parNewGeneration.cpp sharedHeap.hpp
parNewGeneration.cpp space.hpp
+parNewGeneration.cpp spaceDecorator.hpp
parNewGeneration.cpp workgroup.hpp
parNewGeneration.hpp defNewGeneration.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Mon Jul 28 15:30:23 2008 -0700
@@ -53,14 +53,15 @@
asPSOldGen.cpp oop.inline.hpp
asPSOldGen.cpp parallelScavengeHeap.hpp
asPSOldGen.cpp psMarkSweepDecorator.hpp
-asPSOldGen.cpp asPSOldGen.hpp
+asPSOldGen.cpp asPSOldGen.hpp
asPSYoungGen.hpp generationCounters.hpp
asPSYoungGen.hpp mutableSpace.hpp
asPSYoungGen.hpp objectStartArray.hpp
asPSYoungGen.hpp spaceCounters.hpp
asPSYoungGen.hpp psVirtualspace.hpp
-asPSYoungGen.hpp psYoungGen.hpp
+asPSYoungGen.hpp psYoungGen.hpp
+asPSYoungGen.hpp spaceDecorator.hpp
asPSYoungGen.cpp gcUtil.hpp
asPSYoungGen.cpp java.hpp
@@ -68,8 +69,9 @@
asPSYoungGen.cpp parallelScavengeHeap.hpp
asPSYoungGen.cpp psMarkSweepDecorator.hpp
asPSYoungGen.cpp psScavenge.hpp
-asPSYoungGen.cpp asPSYoungGen.hpp
-asPSYoungGen.cpp psYoungGen.hpp
+asPSYoungGen.cpp asPSYoungGen.hpp
+asPSYoungGen.cpp psYoungGen.hpp
+asPSYoungGen.cpp spaceDecorator.hpp
cardTableExtension.cpp cardTableExtension.hpp
cardTableExtension.cpp gcTaskManager.hpp
@@ -225,6 +227,7 @@
psMarkSweep.cpp referencePolicy.hpp
psMarkSweep.cpp referenceProcessor.hpp
psMarkSweep.cpp safepoint.hpp
+psMarkSweep.cpp spaceDecorator.hpp
psMarkSweep.cpp symbolTable.hpp
psMarkSweep.cpp systemDictionary.hpp
psMarkSweep.cpp vmThread.hpp
@@ -239,6 +242,7 @@
psMarkSweepDecorator.cpp parallelScavengeHeap.hpp
psMarkSweepDecorator.cpp psMarkSweep.hpp
psMarkSweepDecorator.cpp psMarkSweepDecorator.hpp
+psMarkSweepDecorator.cpp spaceDecorator.hpp
psMarkSweepDecorator.cpp systemDictionary.hpp
psMarkSweepDecorator.hpp mutableSpace.hpp
@@ -290,6 +294,7 @@
psOldGen.cpp parallelScavengeHeap.hpp
psOldGen.cpp psMarkSweepDecorator.hpp
psOldGen.cpp psOldGen.hpp
+psOldGen.cpp spaceDecorator.hpp
psOldGen.hpp psGenerationCounters.hpp
psOldGen.hpp mutableSpace.hpp
@@ -351,6 +356,7 @@
psScavenge.cpp referencePolicy.hpp
psScavenge.cpp referenceProcessor.hpp
psScavenge.cpp resourceArea.hpp
+psScavenge.cpp spaceDecorator.hpp
psScavenge.cpp threadCritical.hpp
psScavenge.cpp vmThread.hpp
psScavenge.cpp vm_operations.hpp
@@ -409,8 +415,8 @@
psVirtualspace.cpp os.hpp
psVirtualspace.cpp os_<os_family>.inline.hpp
-psVirtualspace.cpp psVirtualspace.hpp
-psVirtualspace.cpp virtualspace.hpp
+psVirtualspace.cpp psVirtualspace.hpp
+psVirtualspace.cpp virtualspace.hpp
psYoungGen.cpp gcUtil.hpp
psYoungGen.cpp java.hpp
@@ -419,7 +425,8 @@
psYoungGen.cpp psMarkSweepDecorator.hpp
psYoungGen.cpp psScavenge.hpp
psYoungGen.cpp psYoungGen.hpp
-psYoungGen.cpp mutableNUMASpace.hpp
+psYoungGen.cpp mutableNUMASpace.hpp
+psYoungGen.cpp spaceDecorator.hpp
psYoungGen.hpp psGenerationCounters.hpp
psYoungGen.hpp mutableSpace.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared Mon Jul 28 15:30:23 2008 -0700
@@ -56,6 +56,7 @@
mutableNUMASpace.cpp mutableNUMASpace.hpp
mutableNUMASpace.cpp oop.inline.hpp
mutableNUMASpace.cpp sharedHeap.hpp
+mutableNUMASpace.cpp spaceDecorator.hpp
mutableNUMASpace.cpp thread_<os_family>.inline.hpp
mutableNUMASpace.hpp mutableSpace.hpp
@@ -64,6 +65,7 @@
mutableSpace.cpp mutableSpace.hpp
mutableSpace.cpp oop.inline.hpp
mutableSpace.cpp safepoint.hpp
+mutableSpace.cpp spaceDecorator.hpp
mutableSpace.cpp thread.hpp
spaceCounters.cpp resourceArea.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -162,10 +162,9 @@
// Grow the generation
size_t change = desired_size - orig_size;
assert(change % alignment == 0, "just checking");
- if (!virtual_space()->expand_by(change)) {
+ if (expand(change)) {
return false; // Error if we fail to resize!
}
-
size_changed = true;
} else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size;
@@ -222,7 +221,9 @@
// Was there a shrink of the survivor space?
if (new_end < to()->end()) {
MemRegion mr(to()->bottom(), new_end);
- to()->initialize(mr, false /* clear */);
+ to()->initialize(mr,
+ SpaceDecorator::DontClear,
+ SpaceDecorator::DontMangle);
}
}
}
@@ -322,9 +323,7 @@
pointer_delta(from_start, eden_start, sizeof(char)));
}
-// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size);
eden_size = align_size_down(eden_size, alignment);
-// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size);
eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed")
@@ -501,11 +500,31 @@
size_t old_from = from()->capacity();
size_t old_to = to()->capacity();
+ // If not clearing the spaces, do some checking to verify that
+ // the spaces are already mangled.
+
+ // Must check mangling before the spaces are reshaped. Otherwise,
+ // the bottom or end of one space may have moved into another
+ // a failure of the check may not correctly indicate which space
+ // is not properly mangled.
+ if (ZapUnusedHeapArea) {
+ HeapWord* limit = (HeapWord*) virtual_space()->high();
+ eden()->check_mangled_unused_area(limit);
+ from()->check_mangled_unused_area(limit);
+ to()->check_mangled_unused_area(limit);
+ }
+
// The call to initialize NULL's the next compaction space
- eden()->initialize(edenMR, true);
+ eden()->initialize(edenMR,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
eden()->set_next_compaction_space(from());
- to()->initialize(toMR , true);
- from()->initialize(fromMR, false); // Note, not cleared!
+ to()->initialize(toMR ,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ from()->initialize(fromMR,
+ SpaceDecorator::DontClear,
+ SpaceDecorator::DontMangle);
assert(from()->top() == old_from_top, "from top changed!");
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -727,7 +727,7 @@
SpecializationStats::clear();
age_table()->clear();
- to()->clear();
+ to()->clear(SpaceDecorator::Mangle);
gch->save_marks();
assert(workers != NULL, "Need parallel worker threads.");
@@ -793,8 +793,18 @@
}
if (!promotion_failed()) {
// Swap the survivor spaces.
- eden()->clear();
- from()->clear();
+ eden()->clear(SpaceDecorator::Mangle);
+ from()->clear(SpaceDecorator::Mangle);
+ if (ZapUnusedHeapArea) {
+ // This is now done here because of the piece-meal mangling which
+ // can check for valid mangling at intermediate points in the
+ // collection(s). When a minor collection fails to collect
+ // sufficient space resizing of the young generation can occur
+ // an redistribute the spaces in the young generation. Mangle
+ // here so that unzapped regions don't get distributed to
+ // other spaces.
+ to()->mangle_unused_area();
+ }
swap_spaces();
assert(to()->is_empty(), "to space should be empty now");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -170,9 +170,20 @@
if (desired_size > orig_size) {
// Grow the generation
size_t change = desired_size - orig_size;
+ HeapWord* prev_low = (HeapWord*) virtual_space()->low();
if (!virtual_space()->expand_by(change)) {
return false;
}
+ if (ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately because it
+ // can be done here more simply that after the new
+ // spaces have been computed.
+ HeapWord* new_low = (HeapWord*) virtual_space()->low();
+ assert(new_low < prev_low, "Did not grow");
+
+ MemRegion mangle_region(new_low, prev_low);
+ SpaceMangler::mangle_region(mangle_region);
+ }
size_changed = true;
} else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size;
@@ -215,8 +226,10 @@
// current implementation does not allow holes between the spaces
// _young_generation_boundary has to be reset because it changes.
// so additional verification
+
void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) {
+ assert(UseAdaptiveSizePolicy, "sanity check");
assert(requested_eden_size > 0 && requested_survivor_size > 0,
"just checking");
@@ -276,22 +289,42 @@
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
+ const bool maintain_minimum =
+ (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
+ bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space
- if (from_start < to_start) {
+ if (eden_from_to_order) {
// Eden, from, to
+
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:");
}
// Set eden
- // Compute how big eden can be, then adjust end.
- // See comment in PSYoungGen::resize_spaces() on
- // calculating eden_end.
- const size_t eden_size = MIN2(requested_eden_size,
- pointer_delta(from_start,
- eden_start,
- sizeof(char)));
+ // "requested_eden_size" is a goal for the size of eden
+ // and may not be attainable. "eden_size" below is
+ // calculated based on the location of from-space and
+ // the goal for the size of eden. from-space is
+ // fixed in place because it contains live data.
+ // The calculation is done this way to avoid 32bit
+ // overflow (i.e., eden_start + requested_eden_size
+ // may too large for representation in 32bits).
+ size_t eden_size;
+ if (maintain_minimum) {
+ // Only make eden larger than the requested size if
+ // the minimum size of the generation has to be maintained.
+ // This could be done in general but policy at a higher
+ // level is determining a requested size for eden and that
+ // should be honored unless there is a fundamental reason.
+ eden_size = pointer_delta(from_start,
+ eden_start,
+ sizeof(char));
+ } else {
+ eden_size = MIN2(requested_eden_size,
+ pointer_delta(from_start, eden_start, sizeof(char)));
+ }
+
eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed")
@@ -371,12 +404,14 @@
to_start = MAX2(to_start, eden_start + alignment);
// Compute how big eden can be, then adjust end.
- // See comment in PSYoungGen::resize_spaces() on
- // calculating eden_end.
- const size_t eden_size = MIN2(requested_eden_size,
- pointer_delta(to_start,
- eden_start,
- sizeof(char)));
+ // See comments above on calculating eden_end.
+ size_t eden_size;
+ if (maintain_minimum) {
+ eden_size = pointer_delta(to_start, eden_start, sizeof(char));
+ } else {
+ eden_size = MIN2(requested_eden_size,
+ pointer_delta(to_start, eden_start, sizeof(char)));
+ }
eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed")
@@ -423,9 +458,47 @@
size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes();
- eden_space()->initialize(edenMR, true);
- to_space()->initialize(toMR , true);
- from_space()->initialize(fromMR, false); // Note, not cleared!
+ if (ZapUnusedHeapArea) {
+ // NUMA is a special case because a numa space is not mangled
+ // in order to not prematurely bind its address to memory to
+ // the wrong memory (i.e., don't want the GC thread to first
+ // touch the memory). The survivor spaces are not numa
+ // spaces and are mangled.
+ if (UseNUMA) {
+ if (eden_from_to_order) {
+ mangle_survivors(from_space(), fromMR, to_space(), toMR);
+ } else {
+ mangle_survivors(to_space(), toMR, from_space(), fromMR);
+ }
+ }
+
+ // If not mangling the spaces, do some checking to verify that
+ // the spaces are already mangled.
+ // The spaces should be correctly mangled at this point so
+ // do some checking here. Note that they are not being mangled
+ // in the calls to initialize().
+ // Must check mangling before the spaces are reshaped. Otherwise,
+ // the bottom or end of one space may have moved into an area
+ // covered by another space and a failure of the check may
+ // not correctly indicate which space is not properly mangled.
+
+ HeapWord* limit = (HeapWord*) virtual_space()->high();
+ eden_space()->check_mangled_unused_area(limit);
+ from_space()->check_mangled_unused_area(limit);
+ to_space()->check_mangled_unused_area(limit);
+ }
+ // When an existing space is being initialized, it is not
+ // mangled because the space has been previously mangled.
+ eden_space()->initialize(edenMR,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ to_space()->initialize(toMR,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ from_space()->initialize(fromMR,
+ SpaceDecorator::DontClear,
+ SpaceDecorator::DontMangle);
+
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
assert(from_space()->top() == old_from_top, "from top changed!");
@@ -446,7 +519,6 @@
}
space_invariants();
}
-
void ASPSYoungGen::reset_after_change() {
assert_locked_or_safepoint(Heap_lock);
@@ -458,7 +530,9 @@
HeapWord* eden_bottom = eden_space()->bottom();
if (new_eden_bottom != eden_bottom) {
MemRegion eden_mr(new_eden_bottom, eden_space()->end());
- eden_space()->initialize(eden_mr, true);
+ eden_space()->initialize(eden_mr,
+ SpaceDecorator::Clear,
+ SpaceDecorator::Mangle);
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
}
MemRegion cmr((HeapWord*)virtual_space()->low(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -666,9 +666,9 @@
HeapWord* new_end_for_commit =
MIN2(cur_committed.end(), _guard_region.start());
- MemRegion new_committed =
- MemRegion(new_start_aligned, new_end_for_commit);
- if(!new_committed.is_empty()) {
+ if(new_start_aligned < new_end_for_commit) {
+ MemRegion new_committed =
+ MemRegion(new_start_aligned, new_end_for_commit);
if (!os::commit_memory((char*)new_committed.start(),
new_committed.byte_size())) {
vm_exit_out_of_memory(new_committed.byte_size(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -938,3 +938,23 @@
// Delegate the resize to the generation.
_old_gen->resize(desired_free_space);
}
+
+#ifndef PRODUCT
+void ParallelScavengeHeap::record_gen_tops_before_GC() {
+ if (ZapUnusedHeapArea) {
+ young_gen()->record_spaces_top();
+ old_gen()->record_spaces_top();
+ perm_gen()->record_spaces_top();
+ }
+}
+
+void ParallelScavengeHeap::gen_mangle_unused_area() {
+ if (ZapUnusedHeapArea) {
+ young_gen()->eden_space()->mangle_unused_area();
+ young_gen()->to_space()->mangle_unused_area();
+ young_gen()->from_space()->mangle_unused_area();
+ old_gen()->object_space()->mangle_unused_area();
+ perm_gen()->object_space()->mangle_unused_area();
+ }
+}
+#endif
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -213,6 +213,12 @@
// Resize the old generation. The reserved space for the
// generation may be expanded in preparation for the resize.
void resize_old_gen(size_t desired_free_space);
+
+ // Save the tops of the spaces in all generations
+ void record_gen_tops_before_GC() PRODUCT_RETURN;
+
+ // Mangle the unused parts of all spaces in the heap
+ void gen_mangle_unused_area() PRODUCT_RETURN;
};
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -98,6 +98,9 @@
// Increment the invocation count
heap->increment_total_collections(true /* full */);
+ // Save information needed to minimize mangling
+ heap->record_gen_tops_before_GC();
+
// We need to track unique mark sweep invocations as well.
_total_invocations++;
@@ -188,6 +191,12 @@
deallocate_stacks();
+ if (ZapUnusedHeapArea) {
+ // Do a complete mangle (top to end) because the usage for
+ // scratch does not maintain a top pointer.
+ young_gen->to_space()->mangle_unused_area_complete();
+ }
+
eden_empty = young_gen->eden_space()->is_empty();
if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
@@ -198,7 +207,7 @@
Universe::update_heap_info_at_gc();
survivors_empty = young_gen->from_space()->is_empty() &&
- young_gen->to_space()->is_empty();
+ young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
BarrierSet* bs = heap->barrier_set();
@@ -344,6 +353,11 @@
perm_gen->verify_object_start_array();
}
+ if (ZapUnusedHeapArea) {
+ old_gen->object_space()->check_mangled_unused_area_complete();
+ perm_gen->object_space()->check_mangled_unused_area_complete();
+ }
+
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
if (PrintHeapAtGC) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -438,5 +438,7 @@
"should point inside space");
space()->set_top(compaction_top());
- if (mangle_free_space) space()->mangle_unused_area();
+ if (mangle_free_space) {
+ space()->mangle_unused_area();
+ }
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -87,6 +87,15 @@
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
+ if (ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately rather than
+ // waiting for the initialization of the space even though
+ // mangling is related to spaces. Doing it here eliminates
+ // the need to carry along information that a complete mangling
+ // (bottom to end) needs to be done.
+ SpaceMangler::mangle_region(cmr);
+ }
+
Universe::heap()->barrier_set()->resize_covered_region(cmr);
CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
@@ -112,7 +121,9 @@
if (_object_space == NULL)
vm_exit_during_initialization("Could not allocate an old gen space");
- object_space()->initialize(cmr, true);
+ object_space()->initialize(cmr,
+ SpaceDecorator::Clear,
+ SpaceDecorator::Mangle);
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
@@ -232,6 +243,19 @@
assert_locked_or_safepoint(Heap_lock);
bool result = virtual_space()->expand_by(bytes);
if (result) {
+ if (ZapUnusedHeapArea) {
+ // We need to mangle the newly expanded area. The memregion spans
+ // end -> new_end, we assume that top -> end is already mangled.
+ // Do the mangling before post_resize() is called because
+ // the space is available for allocation after post_resize();
+ HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
+ assert(object_space()->end() < virtual_space_high,
+ "Should be true before post_resize()");
+ MemRegion mangle_region(object_space()->end(), virtual_space_high);
+ // Note that the object space has not yet been updated to
+ // coincede with the new underlying virtual space.
+ SpaceMangler::mangle_region(mangle_region);
+ }
post_resize();
if (UsePerfData) {
_space_counters->update_capacity();
@@ -348,16 +372,7 @@
start_array()->set_covered_region(new_memregion);
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
- // Did we expand?
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
- if (object_space()->end() < virtual_space_high) {
- // We need to mangle the newly expanded area. The memregion spans
- // end -> new_end, we assume that top -> end is already mangled.
- // This cannot be safely tested for, as allocation may be taking
- // place.
- MemRegion mangle_region(object_space()->end(), virtual_space_high);
- object_space()->mangle_region(mangle_region);
- }
// ALWAYS do this last!!
object_space()->set_end(virtual_space_high);
@@ -462,3 +477,10 @@
VerifyObjectStartArrayClosure check( this, &_start_array );
object_iterate(&check);
}
+
+#ifndef PRODUCT
+void PSOldGen::record_spaces_top() {
+ assert(ZapUnusedHeapArea, "Not mangling unused space");
+ object_space()->set_top_for_allocations();
+}
+#endif
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -185,4 +185,8 @@
// Printing support
virtual const char* name() const { return _name; }
+
+ // Debugging support
+ // Save the tops of all spaces for later use during mangling.
+ void record_spaces_top() PRODUCT_RETURN;
};
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -200,8 +200,8 @@
for (unsigned int id = 0; id < last_space_id; ++id) {
const MutableSpace* space = _space_info[id].space();
tty->print_cr("%u %s "
- SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
- SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
+ SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
+ SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
id, space_names[id],
summary_data().addr_to_chunk_idx(space->bottom()),
summary_data().addr_to_chunk_idx(space->top()),
@@ -213,8 +213,8 @@
void
print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
{
-#define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7")
-#define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5")
+#define CHUNK_IDX_FORMAT SIZE_FORMAT_W(7)
+#define CHUNK_DATA_FORMAT SIZE_FORMAT_W(5)
ParallelCompactData& sd = PSParallelCompact::summary_data();
size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
@@ -269,9 +269,9 @@
const ParallelCompactData::ChunkData* c,
bool newline = true)
{
- tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
- SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
- SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
+ tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
+ SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
+ SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, c->destination(),
c->partial_obj_size(), c->live_obj_size(),
c->data_size(), c->source_chunk(), c->destination_count());
@@ -326,7 +326,7 @@
}
print_initial_summary_chunk(i, c, false);
- tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
+ tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
reclaimed_ratio, dead_to_right, live_to_right);
live_to_right -= c->data_size();
@@ -338,8 +338,8 @@
print_initial_summary_chunk(i, summary_data.chunk(i));
}
- tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
- "l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
+ tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
+ "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
max_reclaimed_ratio_chunk, max_dead_to_right,
max_live_to_right, max_reclaimed_ratio);
}
@@ -1060,6 +1060,10 @@
ref_processor()->enqueue_discovered_references(NULL);
+ if (ZapUnusedHeapArea) {
+ heap->gen_mangle_unused_area();
+ }
+
// Update time of last GC
reset_millis_since_last_gc();
}
@@ -1119,8 +1123,8 @@
HeapWord* chunk_destination = cp->destination();
const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
if (TraceParallelOldGCDensePrefix && Verbose) {
- tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
- "dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
+ tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
+ "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
sd.chunk(cp), chunk_destination,
dense_prefix, cur_deadwood);
}
@@ -1145,7 +1149,7 @@
return dense_prefix;
}
if (TraceParallelOldGCDensePrefix && Verbose) {
- tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
+ tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
"pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
prev_chunk_density_to_right);
}
@@ -1182,7 +1186,7 @@
const size_t live_to_right = new_top - cp->destination();
const size_t dead_to_right = space->top() - addr - live_to_right;
- tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
+ tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
"spl=" SIZE_FORMAT " "
"d2l=" SIZE_FORMAT " d2l%%=%6.4f "
"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
@@ -1522,48 +1526,53 @@
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
{
assert(id < last_space_id, "id out of range");
+ assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
+ "should have been set in summarize_spaces_quick()");
const MutableSpace* space = _space_info[id].space();
- HeapWord** new_top_addr = _space_info[id].new_top_addr();
-
- HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
- _space_info[id].set_dense_prefix(dense_prefix_end);
+ if (_space_info[id].new_top() != space->bottom()) {
+ HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
+ _space_info[id].set_dense_prefix(dense_prefix_end);
#ifndef PRODUCT
- if (TraceParallelOldGCDensePrefix) {
- print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
- HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
- print_dense_prefix_stats("density", id, maximum_compaction, addr);
- }
+ if (TraceParallelOldGCDensePrefix) {
+ print_dense_prefix_stats("ratio", id, maximum_compaction,
+ dense_prefix_end);
+ HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
+ print_dense_prefix_stats("density", id, maximum_compaction, addr);
+ }
#endif // #ifndef PRODUCT
- // If dead space crosses the dense prefix boundary, it is (at least partially)
- // filled with a dummy object, marked live and added to the summary data.
- // This simplifies the copy/update phase and must be done before the final
- // locations of objects are determined, to prevent leaving a fragment of dead
- // space that is too small to fill with an object.
- if (!maximum_compaction && dense_prefix_end != space->bottom()) {
- fill_dense_prefix_end(id);
+ // If dead space crosses the dense prefix boundary, it is (at least
+ // partially) filled with a dummy object, marked live and added to the
+ // summary data. This simplifies the copy/update phase and must be done
+ // before the final locations of objects are determined, to prevent leaving
+ // a fragment of dead space that is too small to fill with an object.
+ if (!maximum_compaction && dense_prefix_end != space->bottom()) {
+ fill_dense_prefix_end(id);
+ }
+
+ // Compute the destination of each Chunk, and thus each object.
+ _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
+ _summary_data.summarize(dense_prefix_end, space->end(),
+ dense_prefix_end, space->top(),
+ _space_info[id].new_top_addr());
}
- // Compute the destination of each Chunk, and thus each object.
- _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
- _summary_data.summarize(dense_prefix_end, space->end(),
- dense_prefix_end, space->top(),
- new_top_addr);
-
if (TraceParallelOldGCSummaryPhase) {
const size_t chunk_size = ParallelCompactData::ChunkSize;
+ HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
- const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
+ HeapWord* const new_top = _space_info[id].new_top();
+ const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top);
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
"dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
id, space->capacity_in_words(), dense_prefix_end,
dp_chunk, dp_words / chunk_size,
- cr_words / chunk_size, *new_top_addr);
+ cr_words / chunk_size, new_top);
}
}
@@ -1632,7 +1641,7 @@
const size_t live = pointer_delta(_space_info[id].new_top(),
space->bottom());
const size_t available = pointer_delta(target_space_end, *new_top_addr);
- if (live <= available) {
+ if (live > 0 && live <= available) {
// All the live data will fit.
if (TraceParallelOldGCSummaryPhase) {
tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
@@ -1642,16 +1651,18 @@
space->bottom(), space->top(),
new_top_addr);
- // Reset the new_top value for the space.
- _space_info[id].set_new_top(space->bottom());
-
// Clear the source_chunk field for each chunk in the space.
+ HeapWord* const new_top = _space_info[id].new_top();
+ HeapWord* const clear_end = _summary_data.chunk_align_up(new_top);
ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
- ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
- while (beg_chunk <= end_chunk) {
+ ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end);
+ while (beg_chunk < end_chunk) {
beg_chunk->set_source_chunk(0);
++beg_chunk;
}
+
+ // Reset the new_top value for the space.
+ _space_info[id].set_new_top(space->bottom());
}
}
@@ -1961,6 +1972,11 @@
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
+ if (ZapUnusedHeapArea) {
+ // Save information needed to minimize mangling
+ heap->record_gen_tops_before_GC();
+ }
+
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
// Make sure data structures are sane, make the heap parsable, and do other
@@ -2129,17 +2145,19 @@
size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes();
- size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
- young_gen->eden_space()->used_in_bytes(),
- old_gen->used_in_bytes(),
- perm_gen->used_in_bytes(),
- young_gen->eden_space()->capacity_in_bytes(),
- old_gen->max_gen_size(),
- max_eden_size,
- true /* full gc*/,
- gc_cause);
-
- heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
+ size_policy->compute_generation_free_space(
+ young_gen->used_in_bytes(),
+ young_gen->eden_space()->used_in_bytes(),
+ old_gen->used_in_bytes(),
+ perm_gen->used_in_bytes(),
+ young_gen->eden_space()->capacity_in_bytes(),
+ old_gen->max_gen_size(),
+ max_eden_size,
+ true /* full gc*/,
+ gc_cause);
+
+ heap->resize_old_gen(
+ size_policy->calculated_old_free_size_in_bytes());
// Don't resize the young generation at an major collection. A
// desired young generation size may have been calculated but
@@ -2212,6 +2230,11 @@
perm_gen->verify_object_start_array();
}
+ if (ZapUnusedHeapArea) {
+ old_gen->object_space()->check_mangled_unused_area_complete();
+ perm_gen->object_space()->check_mangled_unused_area_complete();
+ }
+
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
collection_exit.update();
@@ -2499,7 +2522,7 @@
if (TraceParallelOldGCCompactionPhase && Verbose) {
const size_t count_mod_8 = fillable_chunks & 7;
if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
- gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
+ gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
if (count_mod_8 == 7) gclog_or_tty->cr();
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -716,6 +716,99 @@
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
};
+// The UseParallelOldGC collector is a stop-the-world garbage
+// collector that does parts of the collection using parallel threads.
+// The collection includes the tenured generation and the young
+// generation. The permanent generation is collected at the same
+// time as the other two generations but the permanent generation
+// is collect by a single GC thread. The permanent generation is
+// collected serially because of the requirement that during the
+// processing of a klass AAA, any objects reference by AAA must
+// already have been processed. This requirement is enforced by
+// a left (lower address) to right (higher address) sliding compaction.
+//
+// There are four phases of the collection.
+//
+// - marking phase
+// - summary phase
+// - compacting phase
+// - clean up phase
+//
+// Roughly speaking these phases correspond, respectively, to
+// - mark all the live objects
+// - calculate the destination of each object at the end of the collection
+// - move the objects to their destination
+// - update some references and reinitialize some variables
+//
+// These three phases are invoked in PSParallelCompact::invoke_no_policy().
+// The marking phase is implemented in PSParallelCompact::marking_phase()
+// and does a complete marking of the heap.
+// The summary phase is implemented in PSParallelCompact::summary_phase().
+// The move and update phase is implemented in PSParallelCompact::compact().
+//
+// A space that is being collected is divided into chunks and with
+// each chunk is associated an object of type ParallelCompactData.
+// Each chunk is of a fixed size and typically will contain more than
+// 1 object and may have parts of objects at the front and back of the
+// chunk.
+//
+// chunk -----+---------------------+----------
+// objects covered [ AAA )[ BBB )[ CCC )[ DDD )
+//
+// The marking phase does a complete marking of all live objects in the
+// heap. The marking also compiles the size of the data for
+// all live objects covered by the chunk. This size includes the
+// part of any live object spanning onto the chunk (part of AAA
+// if it is live) from the front, all live objects contained in the chunk
+// (BBB and/or CCC if they are live), and the part of any live objects
+// covered by the chunk that extends off the chunk (part of DDD if it is
+// live). The marking phase uses multiple GC threads and marking is
+// done in a bit array of type ParMarkBitMap. The marking of the
+// bit map is done atomically as is the accumulation of the size of the
+// live objects covered by a chunk.
+//
+// The summary phase calculates the total live data to the left of
+// each chunk XXX. Based on that total and the bottom of the space,
+// it can calculate the starting location of the live data in XXX.
+// The summary phase calculates for each chunk XXX quantites such as
+//
+// - the amount of live data at the beginning of a chunk from an object
+// entering the chunk.
+// - the location of the first live data on the chunk
+// - a count of the number of chunks receiving live data from XXX.
+//
+// See ParallelCompactData for precise details. The summary phase also
+// calculates the dense prefix for the compaction. The dense prefix
+// is a portion at the beginning of the space that is not moved. The
+// objects in the dense prefix do need to have their object references
+// updated. See method summarize_dense_prefix().
+//
+// The summary phase is done using 1 GC thread.
+//
+// The compaction phase moves objects to their new location and updates
+// all references in the object.
+//
+// A current exception is that objects that cross a chunk boundary
+// are moved but do not have their references updated. References are
+// not updated because it cannot easily be determined if the klass
+// pointer KKK for the object AAA has been updated. KKK likely resides
+// in a chunk to the left of the chunk containing AAA. These AAA's
+// have there references updated at the end in a clean up phase.
+// See the method PSParallelCompact::update_deferred_objects(). An
+// alternate strategy is being investigated for this deferral of updating.
+//
+// Compaction is done on a chunk basis. A chunk that is ready to be
+// filled is put on a ready list and GC threads take chunk off the list
+// and fill them. A chunk is ready to be filled if it
+// empty of live objects. Such a chunk may have been initially
+// empty (only contained
+// dead objects) or may have had all its live objects copied out already.
+// A chunk that compacts into itself is also ready for filling. The
+// ready list is initially filled with empty chunks and chunks compacting
+// into themselves. There is always at least 1 chunk that can be put on
+// the ready list. The chunks are atomically added and removed from
+// the ready list.
+//
class PSParallelCompact : AllStatic {
public:
// Convenient access to type names.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -265,6 +265,11 @@
young_gen->eden_space()->accumulate_statistics();
}
+ if (ZapUnusedHeapArea) {
+ // Save information needed to minimize mangling
+ heap->record_gen_tops_before_GC();
+ }
+
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
@@ -315,7 +320,7 @@
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
- young_gen->to_space()->clear();
+ young_gen->to_space()->clear(SpaceDecorator::Mangle);
} else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area();
}
@@ -437,8 +442,10 @@
if (!promotion_failure_occurred) {
// Swap the survivor spaces.
- young_gen->eden_space()->clear();
- young_gen->from_space()->clear();
+
+
+ young_gen->eden_space()->clear(SpaceDecorator::Mangle);
+ young_gen->from_space()->clear(SpaceDecorator::Mangle);
young_gen->swap_spaces();
size_t survived = young_gen->from_space()->used_in_bytes();
@@ -600,6 +607,12 @@
Universe::print_heap_after_gc();
}
+ if (ZapUnusedHeapArea) {
+ young_gen->eden_space()->check_mangled_unused_area_complete();
+ young_gen->from_space()->check_mangled_unused_area_complete();
+ young_gen->to_space()->check_mangled_unused_area_complete();
+ }
+
scavenge_exit.update();
if (PrintGCTaskTimeStamps) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -36,7 +36,7 @@
void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
assert(_init_gen_size != 0, "Should have a finite size");
_virtual_space = new PSVirtualSpace(rs, alignment);
- if (!_virtual_space->expand_by(_init_gen_size)) {
+ if (!virtual_space()->expand_by(_init_gen_size)) {
vm_exit_during_initialization("Could not reserve enough space for "
"object heap");
}
@@ -49,12 +49,19 @@
void PSYoungGen::initialize_work() {
- _reserved = MemRegion((HeapWord*)_virtual_space->low_boundary(),
- (HeapWord*)_virtual_space->high_boundary());
+ _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
+ (HeapWord*)virtual_space()->high_boundary());
+
+ MemRegion cmr((HeapWord*)virtual_space()->low(),
+ (HeapWord*)virtual_space()->high());
+ Universe::heap()->barrier_set()->resize_covered_region(cmr);
- MemRegion cmr((HeapWord*)_virtual_space->low(),
- (HeapWord*)_virtual_space->high());
- Universe::heap()->barrier_set()->resize_covered_region(cmr);
+ if (ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately because it
+ // can be done here more simply that after the new
+ // spaces have been computed.
+ SpaceMangler::mangle_region(cmr);
+ }
if (UseNUMA) {
_eden_space = new MutableNUMASpace();
@@ -89,7 +96,7 @@
// Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t alignment = heap->intra_heap_alignment();
- size_t size = _virtual_space->reserved_size();
+ size_t size = virtual_space()->reserved_size();
size_t max_survivor_size;
size_t max_eden_size;
@@ -142,7 +149,7 @@
// Compute sizes
size_t alignment = heap->intra_heap_alignment();
- size_t size = _virtual_space->committed_size();
+ size_t size = virtual_space()->committed_size();
size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment);
@@ -164,18 +171,18 @@
}
void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
- assert(eden_size < _virtual_space->committed_size(), "just checking");
+ assert(eden_size < virtual_space()->committed_size(), "just checking");
assert(eden_size > 0 && survivor_size > 0, "just checking");
// Initial layout is Eden, to, from. After swapping survivor spaces,
// that leaves us with Eden, from, to, which is step one in our two
// step resize-with-live-data procedure.
- char *eden_start = _virtual_space->low();
+ char *eden_start = virtual_space()->low();
char *to_start = eden_start + eden_size;
char *from_start = to_start + survivor_size;
char *from_end = from_start + survivor_size;
- assert(from_end == _virtual_space->high(), "just checking");
+ assert(from_end == virtual_space()->high(), "just checking");
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
@@ -184,9 +191,9 @@
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
- eden_space()->initialize(eden_mr, true);
- to_space()->initialize(to_mr , true);
- from_space()->initialize(from_mr, true);
+ eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
+ to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
+ from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
}
#ifndef PRODUCT
@@ -207,7 +214,7 @@
char* to_start = (char*)to_space()->bottom();
char* to_end = (char*)to_space()->end();
- guarantee(eden_start >= _virtual_space->low(), "eden bottom");
+ guarantee(eden_start >= virtual_space()->low(), "eden bottom");
guarantee(eden_start < eden_end, "eden space consistency");
guarantee(from_start < from_end, "from space consistency");
guarantee(to_start < to_end, "to space consistency");
@@ -217,29 +224,29 @@
// Eden, from, to
guarantee(eden_end <= from_start, "eden/from boundary");
guarantee(from_end <= to_start, "from/to boundary");
- guarantee(to_end <= _virtual_space->high(), "to end");
+ guarantee(to_end <= virtual_space()->high(), "to end");
} else {
// Eden, to, from
guarantee(eden_end <= to_start, "eden/to boundary");
guarantee(to_end <= from_start, "to/from boundary");
- guarantee(from_end <= _virtual_space->high(), "from end");
+ guarantee(from_end <= virtual_space()->high(), "from end");
}
// More checks that the virtual space is consistent with the spaces
- assert(_virtual_space->committed_size() >=
+ assert(virtual_space()->committed_size() >=
(eden_space()->capacity_in_bytes() +
to_space()->capacity_in_bytes() +
from_space()->capacity_in_bytes()), "Committed size is inconsistent");
- assert(_virtual_space->committed_size() <= _virtual_space->reserved_size(),
+ assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
"Space invariant");
char* eden_top = (char*)eden_space()->top();
char* from_top = (char*)from_space()->top();
char* to_top = (char*)to_space()->top();
- assert(eden_top <= _virtual_space->high(), "eden top");
- assert(from_top <= _virtual_space->high(), "from top");
- assert(to_top <= _virtual_space->high(), "to top");
+ assert(eden_top <= virtual_space()->high(), "eden top");
+ assert(from_top <= virtual_space()->high(), "from top");
+ assert(to_top <= virtual_space()->high(), "to top");
- _virtual_space->verify();
+ virtual_space()->verify();
}
#endif
@@ -265,8 +272,8 @@
bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
- const size_t alignment = _virtual_space->alignment();
- size_t orig_size = _virtual_space->committed_size();
+ const size_t alignment = virtual_space()->alignment();
+ size_t orig_size = virtual_space()->committed_size();
bool size_changed = false;
// There used to be this guarantee there.
@@ -288,10 +295,18 @@
// Grow the generation
size_t change = desired_size - orig_size;
assert(change % alignment == 0, "just checking");
- if (!_virtual_space->expand_by(change)) {
+ HeapWord* prev_high = (HeapWord*) virtual_space()->high();
+ if (!virtual_space()->expand_by(change)) {
return false; // Error if we fail to resize!
}
-
+ if (ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately because it
+ // can be done here more simply that after the new
+ // spaces have been computed.
+ HeapWord* new_high = (HeapWord*) virtual_space()->high();
+ MemRegion mangle_region(prev_high, new_high);
+ SpaceMangler::mangle_region(mangle_region);
+ }
size_changed = true;
} else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size;
@@ -321,19 +336,95 @@
post_resize();
if (Verbose && PrintGC) {
- size_t current_size = _virtual_space->committed_size();
+ size_t current_size = virtual_space()->committed_size();
gclog_or_tty->print_cr("PSYoung generation size changed: "
SIZE_FORMAT "K->" SIZE_FORMAT "K",
orig_size/K, current_size/K);
}
}
- guarantee(eden_plus_survivors <= _virtual_space->committed_size() ||
- _virtual_space->committed_size() == max_size(), "Sanity");
+ guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
+ virtual_space()->committed_size() == max_size(), "Sanity");
return true;
}
+#ifndef PRODUCT
+// In the numa case eden is not mangled so a survivor space
+// moving into a region previously occupied by a survivor
+// may find an unmangled region. Also in the PS case eden
+// to-space and from-space may not touch (i.e., there may be
+// gaps between them due to movement while resizing the
+// spaces). Those gaps must be mangled.
+void PSYoungGen::mangle_survivors(MutableSpace* s1,
+ MemRegion s1MR,
+ MutableSpace* s2,
+ MemRegion s2MR) {
+ // Check eden and gap between eden and from-space, in deciding
+ // what to mangle in from-space. Check the gap between from-space
+ // and to-space when deciding what to mangle.
+ //
+ // +--------+ +----+ +---+
+ // | eden | |s1 | |s2 |
+ // +--------+ +----+ +---+
+ // +-------+ +-----+
+ // |s1MR | |s2MR |
+ // +-------+ +-----+
+ // All of survivor-space is properly mangled so find the
+ // upper bound on the mangling for any portion above current s1.
+ HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
+ MemRegion delta1_left;
+ if (s1MR.start() < delta_end) {
+ delta1_left = MemRegion(s1MR.start(), delta_end);
+ s1->mangle_region(delta1_left);
+ }
+ // Find any portion to the right of the current s1.
+ HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
+ MemRegion delta1_right;
+ if (delta_start < s1MR.end()) {
+ delta1_right = MemRegion(delta_start, s1MR.end());
+ s1->mangle_region(delta1_right);
+ }
+
+ // Similarly for the second survivor space except that
+ // any of the new region that overlaps with the current
+ // region of the first survivor space has already been
+ // mangled.
+ delta_end = MIN2(s2->bottom(), s2MR.end());
+ delta_start = MAX2(s2MR.start(), s1->end());
+ MemRegion delta2_left;
+ if (s2MR.start() < delta_end) {
+ delta2_left = MemRegion(s2MR.start(), delta_end);
+ s2->mangle_region(delta2_left);
+ }
+ delta_start = MAX2(s2->end(), s2MR.start());
+ MemRegion delta2_right;
+ if (delta_start < s2MR.end()) {
+ s2->mangle_region(delta2_right);
+ }
+
+ if (TraceZapUnusedHeapArea) {
+ // s1
+ gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
+ "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
+ s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
+ gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
+ PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
+ delta1_left.start(), delta1_left.end(), delta1_right.start(),
+ delta1_right.end());
+
+ // s2
+ gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
+ "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
+ s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
+ gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
+ PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
+ delta2_left.start(), delta2_left.end(), delta2_right.start(),
+ delta2_right.end());
+ }
+
+}
+#endif // NOT PRODUCT
void PSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) {
@@ -396,9 +487,11 @@
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
+ bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space
- if (from_start < to_start) {
+ if (eden_from_to_order) {
// Eden, from, to
+ eden_from_to_order = true;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:");
}
@@ -435,7 +528,7 @@
// extra calculations.
// First calculate an optimal to-space
- to_end = (char*)_virtual_space->high();
+ to_end = (char*)virtual_space()->high();
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
sizeof(char));
@@ -491,7 +584,7 @@
// to space as if we were able to resize from space, even though from
// space is not modified.
// Giving eden priority was tried and gave poorer performance.
- to_end = (char*)pointer_delta(_virtual_space->high(),
+ to_end = (char*)pointer_delta(virtual_space()->high(),
(char*)requested_survivor_size,
sizeof(char));
to_end = MIN2(to_end, from_start);
@@ -560,9 +653,45 @@
size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes();
- eden_space()->initialize(edenMR, true);
- to_space()->initialize(toMR , true);
- from_space()->initialize(fromMR, false); // Note, not cleared!
+ if (ZapUnusedHeapArea) {
+ // NUMA is a special case because a numa space is not mangled
+ // in order to not prematurely bind its address to memory to
+ // the wrong memory (i.e., don't want the GC thread to first
+ // touch the memory). The survivor spaces are not numa
+ // spaces and are mangled.
+ if (UseNUMA) {
+ if (eden_from_to_order) {
+ mangle_survivors(from_space(), fromMR, to_space(), toMR);
+ } else {
+ mangle_survivors(to_space(), toMR, from_space(), fromMR);
+ }
+ }
+
+ // If not mangling the spaces, do some checking to verify that
+ // the spaces are already mangled.
+ // The spaces should be correctly mangled at this point so
+ // do some checking here. Note that they are not being mangled
+ // in the calls to initialize().
+ // Must check mangling before the spaces are reshaped. Otherwise,
+ // the bottom or end of one space may have moved into an area
+ // covered by another space and a failure of the check may
+ // not correctly indicate which space is not properly mangled.
+ HeapWord* limit = (HeapWord*) virtual_space()->high();
+ eden_space()->check_mangled_unused_area(limit);
+ from_space()->check_mangled_unused_area(limit);
+ to_space()->check_mangled_unused_area(limit);
+ }
+ // When an existing space is being initialized, it is not
+ // mangled because the space has been previously mangled.
+ eden_space()->initialize(edenMR,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ to_space()->initialize(toMR,
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ from_space()->initialize(fromMR,
+ SpaceDecorator::DontClear,
+ SpaceDecorator::DontMangle);
assert(from_space()->top() == old_from_top, "from top changed!");
@@ -671,7 +800,7 @@
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity_in_bytes()/K, used_in_bytes()/K);
}
- _virtual_space->print_space_boundaries_on(st);
+ virtual_space()->print_space_boundaries_on(st);
st->print(" eden"); eden_space()->print_on(st);
st->print(" from"); from_space()->print_on(st);
st->print(" to "); to_space()->print_on(st);
@@ -774,7 +903,9 @@
// Was there a shrink of the survivor space?
if (new_end < space_shrinking->end()) {
MemRegion mr(space_shrinking->bottom(), new_end);
- space_shrinking->initialize(mr, false /* clear */);
+ space_shrinking->initialize(mr,
+ SpaceDecorator::DontClear,
+ SpaceDecorator::Mangle);
}
}
@@ -809,3 +940,12 @@
from_space()->verify(allow_dirty);
to_space()->verify(allow_dirty);
}
+
+#ifndef PRODUCT
+void PSYoungGen::record_spaces_top() {
+ assert(ZapUnusedHeapArea, "Not mangling unused space");
+ eden_space()->set_top_for_allocations();
+ from_space()->set_top_for_allocations();
+ to_space()->set_top_for_allocations();
+}
+#endif
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -179,4 +179,12 @@
// Space boundary invariant checker
void space_invariants() PRODUCT_RETURN;
+
+ // Helper for mangling survivor spaces.
+ void mangle_survivors(MutableSpace* s1,
+ MemRegion s1MR,
+ MutableSpace* s2,
+ MemRegion s2MR) PRODUCT_RETURN;
+
+ void record_spaces_top() PRODUCT_RETURN;
};
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -58,6 +58,12 @@
_average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
}
+ void clear() {
+ _average = 0;
+ _sample_count = 0;
+ _last_sample = 0;
+ }
+
// Accessors
float average() const { return _average; }
unsigned weight() const { return _weight; }
@@ -115,6 +121,12 @@
float deviation() const { return _deviation; }
unsigned padding() const { return _padding; }
+ void clear() {
+ AdaptiveWeightedAverage::clear();
+ _padded_avg = 0;
+ _deviation = 0;
+ }
+
// Override
void sample(float new_sample);
};
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -42,19 +42,31 @@
delete lgrp_spaces();
}
+#ifndef PRODUCT
void MutableNUMASpace::mangle_unused_area() {
- for (int i = 0; i < lgrp_spaces()->length(); i++) {
- LGRPSpace *ls = lgrp_spaces()->at(i);
- MutableSpace *s = ls->space();
- if (!os::numa_has_static_binding()) {
- HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
- if (top < s->end()) {
- ls->add_invalid_region(MemRegion(top, s->end()));
- }
- }
- s->mangle_unused_area();
- }
+ // This method should do nothing.
+ // It can be called on a numa space during a full compaction.
+}
+void MutableNUMASpace::mangle_unused_area_complete() {
+ // This method should do nothing.
+ // It can be called on a numa space during a full compaction.
+}
+void MutableNUMASpace::mangle_region(MemRegion mr) {
+ // This method should do nothing because numa spaces are not mangled.
}
+void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
+ assert(false, "Do not mangle MutableNUMASpace's");
+}
+void MutableNUMASpace::set_top_for_allocations() {
+ // This method should do nothing.
+}
+void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
+ // This method should do nothing.
+}
+void MutableNUMASpace::check_mangled_unused_area_complete() {
+ // This method should do nothing.
+}
+#endif // NOT_PRODUCT
// There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability.
@@ -129,7 +141,20 @@
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id();
- assert(lgrp_id != -1, "No lgrp_id set");
+ if (lgrp_id == -1) {
+ // This case can occur after the topology of the system has
+ // changed. Thread can change their location, the new home
+ // group will be determined during the first allocation
+ // attempt. For now we can safely assume that all spaces
+ // have equal size because the whole space will be reinitialized.
+ if (lgrp_spaces()->length() > 0) {
+ return capacity_in_bytes() / lgrp_spaces()->length();
+ } else {
+ assert(false, "There should be at least one locality group");
+ return 0;
+ }
+ }
+ // That's the normal case, where we know the locality group of the thread.
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) {
return 0;
@@ -138,9 +163,17 @@
}
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
+ // Please see the comments for tlab_capacity().
guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id();
- assert(lgrp_id != -1, "No lgrp_id set");
+ if (lgrp_id == -1) {
+ if (lgrp_spaces()->length() > 0) {
+ return free_in_bytes() / lgrp_spaces()->length();
+ } else {
+ assert(false, "There should be at least one locality group");
+ return 0;
+ }
+ }
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) {
return 0;
@@ -238,12 +271,20 @@
void MutableNUMASpace::update() {
if (update_layout(false)) {
// If the topology has changed, make all chunks zero-sized.
+ // And clear the alloc-rate statistics.
+ // In future we may want to handle this more gracefully in order
+ // to avoid the reallocation of the pages as much as possible.
for (int i = 0; i < lgrp_spaces()->length(); i++) {
- MutableSpace *s = lgrp_spaces()->at(i)->space();
+ LGRPSpace *ls = lgrp_spaces()->at(i);
+ MutableSpace *s = ls->space();
s->set_end(s->bottom());
s->set_top(s->bottom());
+ ls->clear_alloc_rate();
}
- initialize(region(), true);
+ // A NUMA space is never mangled
+ initialize(region(),
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
} else {
bool should_initialize = false;
if (!os::numa_has_static_binding()) {
@@ -257,7 +298,10 @@
if (should_initialize ||
(UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
- initialize(region(), true);
+ // A NUMA space is never mangled
+ initialize(region(),
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
}
}
@@ -448,14 +492,17 @@
}
}
-void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
+void MutableNUMASpace::initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space) {
assert(clear_space, "Reallocation will destory data!");
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
MemRegion old_region = region(), new_region;
set_bottom(mr.start());
set_end(mr.end());
- MutableSpace::set_top(bottom());
+ // Must always clear the space
+ clear(SpaceDecorator::DontMangle);
// Compute chunk sizes
size_t prev_page_size = page_size();
@@ -586,10 +633,8 @@
bias_region(top_region, ls->lgrp_id());
}
- // If we clear the region, we would mangle it in debug. That would cause page
- // allocation in a different place. Hence setting the top directly.
- s->initialize(new_region, false);
- s->set_top(s->bottom());
+ // Clear space (set top = bottom) but never mangle.
+ s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle);
set_adaptation_cycles(samples_count());
}
@@ -641,10 +686,12 @@
MutableSpace::set_top(value);
}
-void MutableNUMASpace::clear() {
+void MutableNUMASpace::clear(bool mangle_space) {
MutableSpace::set_top(bottom());
for (int i = 0; i < lgrp_spaces()->length(); i++) {
- lgrp_spaces()->at(i)->space()->clear();
+ // Never mangle NUMA spaces because the mangling will
+ // bind the memory to a possibly unwanted lgroup.
+ lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
}
}
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -112,6 +112,7 @@
int lgrp_id() const { return _lgrp_id; }
MutableSpace* space() const { return _space; }
AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; }
+ void clear_alloc_rate() { _alloc_rate->clear(); }
SpaceStats* space_stats() { return &_space_stats; }
void clear_space_stats() { _space_stats = SpaceStats(); }
@@ -171,14 +172,21 @@
MutableNUMASpace();
virtual ~MutableNUMASpace();
// Space initialization.
- virtual void initialize(MemRegion mr, bool clear_space);
+ virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Update space layout if necessary. Do all adaptive resizing job.
virtual void update();
// Update allocation rate averages.
virtual void accumulate_statistics();
- virtual void clear();
- virtual void mangle_unused_area();
+ virtual void clear(bool mangle_space);
+ virtual void mangle_unused_area() PRODUCT_RETURN;
+ virtual void mangle_unused_area_complete() PRODUCT_RETURN;
+ virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
+ virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
+ virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
+ virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
+ virtual void set_top_for_allocations() PRODUCT_RETURN;
+
virtual void ensure_parsability();
virtual size_t used_in_words() const;
virtual size_t free_in_words() const;
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -25,7 +25,17 @@
# include "incls/_precompiled.incl"
# include "incls/_mutableSpace.cpp.incl"
-void MutableSpace::initialize(MemRegion mr, bool clear_space) {
+MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) {
+ _mangler = new MutableSpaceMangler(this);
+}
+
+MutableSpace::~MutableSpace() {
+ delete _mangler;
+}
+
+void MutableSpace::initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space) {
HeapWord* bottom = mr.start();
HeapWord* end = mr.end();
@@ -34,14 +44,51 @@
set_bottom(bottom);
set_end(end);
- if (clear_space) clear();
+ if (clear_space) {
+ clear(mangle_space);
+ }
+}
+
+void MutableSpace::clear(bool mangle_space) {
+ set_top(bottom());
+ if (ZapUnusedHeapArea && mangle_space) {
+ mangle_unused_area();
+ }
+}
+
+#ifndef PRODUCT
+void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
+ mangler()->check_mangled_unused_area(limit);
+}
+
+void MutableSpace::check_mangled_unused_area_complete() {
+ mangler()->check_mangled_unused_area_complete();
}
-void MutableSpace::clear() {
- set_top(bottom());
- if (ZapUnusedHeapArea) mangle_unused_area();
+// Mangle only the unused space that has not previously
+// been mangled and that has not been allocated since being
+// mangled.
+void MutableSpace::mangle_unused_area() {
+ mangler()->mangle_unused_area();
+}
+
+void MutableSpace::mangle_unused_area_complete() {
+ mangler()->mangle_unused_area_complete();
}
+void MutableSpace::mangle_region(MemRegion mr) {
+ SpaceMangler::mangle_region(mr);
+}
+
+void MutableSpace::set_top_for_allocations(HeapWord* v) {
+ mangler()->set_top_for_allocations(v);
+}
+
+void MutableSpace::set_top_for_allocations() {
+ mangler()->set_top_for_allocations(top());
+}
+#endif
+
// This version requires locking. */
HeapWord* MutableSpace::allocate(size_t size) {
assert(Heap_lock->owned_by_self() ||
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -30,14 +30,23 @@
// Invariant: (ImmutableSpace +) bottom() <= top() <= end()
// top() is inclusive and end() is exclusive.
+class MutableSpaceMangler;
+
class MutableSpace: public ImmutableSpace {
friend class VMStructs;
+
+ // Helper for mangling unused space in debug builds
+ MutableSpaceMangler* _mangler;
+
protected:
HeapWord* _top;
+ MutableSpaceMangler* mangler() { return _mangler; }
+
public:
- virtual ~MutableSpace() {}
- MutableSpace() { _top = NULL; }
+ virtual ~MutableSpace();
+ MutableSpace();
+
// Accessors
HeapWord* top() const { return _top; }
virtual void set_top(HeapWord* value) { _top = value; }
@@ -52,21 +61,30 @@
MemRegion used_region() { return MemRegion(bottom(), top()); }
// Initialization
- virtual void initialize(MemRegion mr, bool clear_space);
- virtual void clear();
+ virtual void initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space);
+ virtual void clear(bool mangle_space);
+ // Does the usual initialization but optionally resets top to bottom.
+#if 0 // MANGLE_SPACE
+ void initialize(MemRegion mr, bool clear_space, bool reset_top);
+#endif
virtual void update() { }
virtual void accumulate_statistics() { }
- // Overwrites the unused portion of this space. Note that some collectors
- // may use this "scratch" space during collections.
- virtual void mangle_unused_area() {
- mangle_region(MemRegion(_top, _end));
- }
+ // Methods used in mangling. See descriptions under SpaceMangler.
+ virtual void mangle_unused_area() PRODUCT_RETURN;
+ virtual void mangle_unused_area_complete() PRODUCT_RETURN;
+ virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
+ virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
+ virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
+
+ // Used to save the space's current top for later use during mangling.
+ virtual void set_top_for_allocations() PRODUCT_RETURN;
+
virtual void ensure_parsability() { }
- void mangle_region(MemRegion mr) {
- debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
- }
+ virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
// Boolean querries.
bool is_empty() const { return used_in_words() == 0; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_spaceDecorator.cpp.incl"
+
+// Catch-all file for utility classes
+
+#ifndef PRODUCT
+
+// Returns true is the location q matches the mangling
+// pattern.
+bool SpaceMangler::is_mangled(HeapWord* q) {
+ // This test loses precision but is good enough
+ return badHeapWord == (max_juint & (uintptr_t) q->value());
+}
+
+
+void SpaceMangler::set_top_for_allocations(HeapWord* v) {
+ if (v < end()) {
+ assert(is_mangled(v), "The high water mark is not mangled");
+ }
+ _top_for_allocations = v;
+}
+
+// Mangle only the unused space that has not previously
+// been mangled and that has not been allocated since being
+// mangled.
+void SpaceMangler::mangle_unused_area() {
+ assert(ZapUnusedHeapArea, "Mangling should not be in use");
+ // Mangle between top and the high water mark. Safeguard
+ // against the space changing since top_for_allocations was
+ // set.
+ HeapWord* mangled_end = MIN2(top_for_allocations(), end());
+ if (top() < mangled_end) {
+ MemRegion mangle_mr(top(), mangled_end);
+ SpaceMangler::mangle_region(mangle_mr);
+ // Light weight check of mangling.
+ check_mangled_unused_area(end());
+ }
+ // Complete check of unused area which is functional when
+ // DEBUG_MANGLING is defined.
+ check_mangled_unused_area_complete();
+}
+
+// A complete mangle is expected in the
+// exceptional case where top_for_allocations is not
+// properly tracking the high water mark for mangling.
+// This can be the case when to-space is being used for
+// scratch space during a mark-sweep-compact. See
+// contribute_scratch() and PSMarkSweep::allocate_stacks().
+void SpaceMangler::mangle_unused_area_complete() {
+ assert(ZapUnusedHeapArea, "Mangling should not be in use");
+ MemRegion mangle_mr(top(), end());
+ SpaceMangler::mangle_region(mangle_mr);
+}
+
+// Simply mangle the MemRegion mr.
+void SpaceMangler::mangle_region(MemRegion mr) {
+ assert(ZapUnusedHeapArea, "Mangling should not be in use");
+#ifdef ASSERT
+ if(TraceZapUnusedHeapArea) {
+ gclog_or_tty->print("Mangling [0x%x to 0x%x)", mr.start(), mr.end());
+ }
+ Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
+ if(TraceZapUnusedHeapArea) {
+ gclog_or_tty->print_cr(" done");
+ }
+#endif
+}
+
+// Check that top, top_for_allocations and the last
+// word of the space are mangled. In a tight memory
+// situation even this light weight mangling could
+// cause paging by touching the end of the space.
+void SpaceMangler::check_mangled_unused_area(HeapWord* limit) {
+ if (CheckZapUnusedHeapArea) {
+ // This method can be called while the spaces are
+ // being reshaped so skip the test if the end of the
+ // space is beyond the specified limit;
+ if (end() > limit) return;
+
+ assert(top() == end() ||
+ (is_mangled(top())), "Top not mangled");
+ assert((top_for_allocations() < top()) ||
+ (top_for_allocations() >= end()) ||
+ (is_mangled(top_for_allocations())),
+ "Older unused not mangled");
+ assert(top() == end() ||
+ (is_mangled(end() - 1)), "End not properly mangled");
+ // Only does checking when DEBUG_MANGLING is defined.
+ check_mangled_unused_area_complete();
+ }
+}
+
+#undef DEBUG_MANGLING
+// This should only be used while debugging the mangling
+// because of the high cost of checking the completeness.
+void SpaceMangler::check_mangled_unused_area_complete() {
+ if (CheckZapUnusedHeapArea) {
+ assert(ZapUnusedHeapArea, "Not mangling unused area");
+#ifdef DEBUG_MANGLING
+ HeapWord* q = top();
+ HeapWord* limit = end();
+
+ bool passed = true;
+ while (q < limit) {
+ if (!is_mangled(q)) {
+ passed = false;
+ break;
+ }
+ q++;
+ }
+ assert(passed, "Mangling is not complete");
+#endif
+ }
+}
+#undef DEBUG_MANGLING
+#endif // not PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class SpaceDecorator: public AllStatic {
+ public:
+ // Initialization flags.
+ static const bool Clear = true;
+ static const bool DontClear = false;
+ static const bool Mangle = true;
+ static const bool DontMangle = false;
+};
+
+// Functionality for use with class Space and class MutableSpace.
+// The approach taken with the mangling is to mangle all
+// the space initially and then to mangle areas that have
+// been allocated since the last collection. Mangling is
+// done in the context of a generation and in the context
+// of a space.
+// The space in a generation is mangled when it is first
+// initialized and when the generation grows. The spaces
+// are not necessarily up-to-date when this mangling occurs
+// and the method mangle_region() is used.
+// After allocations have been done in a space, the space generally
+// need to be remangled. Remangling is only done on the
+// recently allocated regions in the space. Typically, that is
+// the region between the new top and the top just before a
+// garbage collection.
+// An exception to the usual mangling in a space is done when the
+// space is used for an extraordinary purpose. Specifically, when
+// to-space is used as scratch space for a mark-sweep-compact
+// collection.
+// Spaces are mangled after a collection. If the generation
+// grows after a collection, the added space is mangled as part of
+// the growth of the generation. No additional mangling is needed when the
+// spaces are resized after an expansion.
+// The class SpaceMangler keeps a pointer to the top of the allocated
+// area and provides the methods for doing the piece meal mangling.
+// Methods for doing sparces and full checking of the mangling are
+// included. The full checking is done if DEBUG_MANGLING is defined.
+// GenSpaceMangler is used with the GenCollectedHeap collectors and
+// MutableSpaceMangler is used with the ParallelScavengeHeap collectors.
+// These subclasses abstract the differences in the types of spaces used
+// by each heap.
+
+class SpaceMangler: public CHeapObj {
+ friend class VMStructs;
+
+ // High water mark for allocations. Typically, the space above
+ // this point have been mangle previously and don't need to be
+ // touched again. Space belows this point has been allocated
+ // and remangling is needed between the current top and this
+ // high water mark.
+ HeapWord* _top_for_allocations;
+ HeapWord* top_for_allocations() { return _top_for_allocations; }
+
+ public:
+
+ // Setting _top_for_allocations to NULL at initialization
+ // makes it always below top so that mangling done as part
+ // of the initialize() call of a space does nothing (as it
+ // should since the mangling is done as part of the constructor
+ // for the space.
+ SpaceMangler() : _top_for_allocations(NULL) {}
+
+ // Methods for top and end that delegate to the specific
+ // space type.
+ virtual HeapWord* top() const = 0;
+ virtual HeapWord* end() const = 0;
+
+ // Return true if q matches the mangled pattern.
+ static bool is_mangled(HeapWord* q) PRODUCT_RETURN0;
+
+ // Used to save the an address in a space for later use during mangling.
+ void set_top_for_allocations(HeapWord* v);
+
+ // Overwrites the unused portion of this space.
+ // Mangle only the region not previously mangled [top, top_previously_mangled)
+ void mangle_unused_area();
+ // Mangle all the unused region [top, end)
+ void mangle_unused_area_complete();
+ // Do some sparse checking on the area that should have been mangled.
+ void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
+ // Do a complete check of the area that should be mangled.
+ void check_mangled_unused_area_complete() PRODUCT_RETURN;
+
+ // Mangle the MemRegion. This is a non-space specific mangler. It
+ // is used during the initial mangling of a space before the space
+ // is fully constructed. Also is used when a generation is expanded
+ // and possibly before the spaces have been reshaped to to the new
+ // size of the generation.
+ static void mangle_region(MemRegion mr);
+};
+
+class ContiguousSpace;
+
+// For use with GenCollectedHeap's
+class GenSpaceMangler: public SpaceMangler {
+ ContiguousSpace* _sp;
+
+ ContiguousSpace* sp() { return _sp; }
+
+ HeapWord* top() const { return _sp->top(); }
+ HeapWord* end() const { return _sp->end(); }
+
+ public:
+ GenSpaceMangler(ContiguousSpace* sp) : SpaceMangler(), _sp(sp) {}
+};
+
+// For use with ParallelScavengeHeap's.
+class MutableSpaceMangler: public SpaceMangler {
+ MutableSpace* _sp;
+
+ MutableSpace* sp() { return _sp; }
+
+ HeapWord* top() const { return _sp->top(); }
+ HeapWord* end() const { return _sp->end(); }
+
+ public:
+ MutableSpaceMangler(MutableSpace* sp) : SpaceMangler(), _sp(sp) {}
+};
--- a/hotspot/src/share/vm/includeDB_core Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/includeDB_core Mon Jul 28 15:30:23 2008 -0700
@@ -1405,6 +1405,7 @@
defNewGeneration.cpp oop.inline.hpp
defNewGeneration.cpp referencePolicy.hpp
defNewGeneration.cpp space.inline.hpp
+defNewGeneration.cpp spaceDecorator.hpp
defNewGeneration.cpp thread_<os_family>.inline.hpp
defNewGeneration.hpp ageTable.hpp
@@ -1789,6 +1790,7 @@
generation.cpp java.hpp
generation.cpp oop.hpp
generation.cpp oop.inline.hpp
+generation.cpp spaceDecorator.hpp
generation.cpp space.inline.hpp
generation.hpp allocation.hpp
@@ -3722,6 +3724,7 @@
space.cpp safepoint.hpp
space.cpp space.hpp
space.cpp space.inline.hpp
+space.cpp spaceDecorator.hpp
space.cpp systemDictionary.hpp
space.cpp universe.inline.hpp
space.cpp vmSymbols.hpp
@@ -3744,6 +3747,13 @@
space.inline.hpp space.hpp
space.inline.hpp universe.hpp
+spaceDecorator.hpp globalDefinitions.hpp
+spaceDecorator.hpp mutableSpace.hpp
+spaceDecorator.hpp space.hpp
+
+spaceDecorator.cpp copy.hpp
+spaceDecorator.cpp spaceDecorator.hpp
+
specialized_oop_closures.cpp ostream.hpp
specialized_oop_closures.cpp specialized_oop_closures.hpp
--- a/hotspot/src/share/vm/includeDB_features Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/includeDB_features Mon Jul 28 15:30:23 2008 -0700
@@ -51,6 +51,7 @@
dump.cpp oopFactory.hpp
dump.cpp resourceArea.hpp
dump.cpp signature.hpp
+dump.cpp spaceDecorator.hpp
dump.cpp symbolTable.hpp
dump.cpp systemDictionary.hpp
dump.cpp vmThread.hpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -172,15 +172,25 @@
_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
_gen_counters);
- compute_space_boundaries(0);
+ compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters();
_next_gen = NULL;
_tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
}
-void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
- uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
+void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
+ bool clear_space,
+ bool mangle_space) {
+ uintx alignment =
+ GenCollectedHeap::heap()->collector_policy()->min_alignment();
+
+ // If the spaces are being cleared (only done at heap initialization
+ // currently), the survivor spaces need not be empty.
+ // Otherwise, no care is taken for used areas in the survivor spaces
+ // so check.
+ assert(clear_space || (to()->is_empty() && from()->is_empty()),
+ "Initialization of the survivor spaces assumes these are empty");
// Compute sizes
uintx size = _virtual_space.committed_size();
@@ -214,16 +224,41 @@
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
- eden()->initialize(edenMR, (minimum_eden_size == 0));
- // If minumum_eden_size != 0, we will not have cleared any
+ // A minimum eden size implies that there is a part of eden that
+ // is being used and that affects the initialization of any
+ // newly formed eden.
+ bool live_in_eden = minimum_eden_size > 0;
+
+ // If not clearing the spaces, do some checking to verify that
+ // the space are already mangled.
+ if (!clear_space) {
+ // Must check mangling before the spaces are reshaped. Otherwise,
+ // the bottom or end of one space may have moved into another
+ // a failure of the check may not correctly indicate which space
+ // is not properly mangled.
+ if (ZapUnusedHeapArea) {
+ HeapWord* limit = (HeapWord*) _virtual_space.high();
+ eden()->check_mangled_unused_area(limit);
+ from()->check_mangled_unused_area(limit);
+ to()->check_mangled_unused_area(limit);
+ }
+ }
+
+ // Reset the spaces for their new regions.
+ eden()->initialize(edenMR,
+ clear_space && !live_in_eden,
+ SpaceDecorator::Mangle);
+ // If clear_space and live_in_eden, we will not have cleared any
// portion of eden above its top. This can cause newly
// expanded space not to be mangled if using ZapUnusedHeapArea.
// We explicitly do such mangling here.
- if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
+ if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
eden()->mangle_unused_area();
}
- from()->initialize(fromMR, true);
- to()->initialize(toMR , true);
+ from()->initialize(fromMR, clear_space, mangle_space);
+ to()->initialize(toMR, clear_space, mangle_space);
+
+ // Set next compaction spaces.
eden()->set_next_compaction_space(from());
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
@@ -250,7 +285,16 @@
bool DefNewGeneration::expand(size_t bytes) {
MutexLocker x(ExpandHeap_lock);
+ HeapWord* prev_high = (HeapWord*) _virtual_space.high();
bool success = _virtual_space.expand_by(bytes);
+ if (success && ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately because it
+ // can be done here more simply that after the new
+ // spaces have been computed.
+ HeapWord* new_high = (HeapWord*) _virtual_space.high();
+ MemRegion mangle_region(prev_high, new_high);
+ SpaceMangler::mangle_region(mangle_region);
+ }
// Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of
@@ -262,7 +306,8 @@
// value.
if (GC_locker::is_active()) {
if (PrintGC && Verbose) {
- gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
+ gclog_or_tty->print_cr("Garbage collection disabled, "
+ "expanded heap instead");
}
}
@@ -326,16 +371,24 @@
changed = true;
}
if (changed) {
- compute_space_boundaries(eden()->used());
- MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
+ // The spaces have already been mangled at this point but
+ // may not have been cleared (set top = bottom) and should be.
+ // Mangling was done when the heap was being expanded.
+ compute_space_boundaries(eden()->used(),
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ MemRegion cmr((HeapWord*)_virtual_space.low(),
+ (HeapWord*)_virtual_space.high());
Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (Verbose && PrintGC) {
size_t new_size_after = _virtual_space.committed_size();
size_t eden_size_after = eden()->capacity();
size_t survivor_size_after = from()->capacity();
- gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
+ gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
+ SIZE_FORMAT "K [eden="
SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
- new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
+ new_size_before/K, new_size_after/K,
+ eden_size_after/K, survivor_size_after/K);
if (WizardMode) {
gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
thread_increase_size/K, threads_count);
@@ -480,7 +533,7 @@
ScanWeakRefClosure scan_weak_ref(this);
age_table()->clear();
- to()->clear();
+ to()->clear(SpaceDecorator::Mangle);
gch->rem_set()->prepare_for_younger_refs_iterate(false);
@@ -525,8 +578,18 @@
soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
if (!promotion_failed()) {
// Swap the survivor spaces.
- eden()->clear();
- from()->clear();
+ eden()->clear(SpaceDecorator::Mangle);
+ from()->clear(SpaceDecorator::Mangle);
+ if (ZapUnusedHeapArea) {
+ // This is now done here because of the piece-meal mangling which
+ // can check for valid mangling at intermediate points in the
+ // collection(s). When a minor collection fails to collect
+ // sufficient space resizing of the young generation can occur
+ // an redistribute the spaces in the young generation. Mangle
+ // here so that unzapped regions don't get distributed to
+ // other spaces.
+ to()->mangle_unused_area();
+ }
swap_spaces();
assert(to()->is_empty(), "to space should be empty now");
@@ -753,6 +816,15 @@
}
}
+void DefNewGeneration::reset_scratch() {
+ // If contributing scratch in to_space, mangle all of
+ // to_space if ZapUnusedHeapArea. This is needed because
+ // top is not maintained while using to-space as scratch.
+ if (ZapUnusedHeapArea) {
+ to()->mangle_unused_area_complete();
+ }
+}
+
bool DefNewGeneration::collection_attempt_is_safe() {
if (!to()->is_empty()) {
return false;
@@ -806,11 +878,25 @@
}
}
+ if (ZapUnusedHeapArea) {
+ eden()->check_mangled_unused_area_complete();
+ from()->check_mangled_unused_area_complete();
+ to()->check_mangled_unused_area_complete();
+ }
+
// update the generation and space performance counters
update_counters();
gch->collector_policy()->counters()->update_counters();
}
+void DefNewGeneration::record_spaces_top() {
+ assert(ZapUnusedHeapArea, "Not mangling unused space");
+ eden()->set_top_for_allocations();
+ to()->set_top_for_allocations();
+ from()->set_top_for_allocations();
+}
+
+
void DefNewGeneration::update_counters() {
if (UsePerfData) {
_eden_counters->update_all();
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -279,6 +279,9 @@
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
+ // Save the tops for eden, from, and to
+ virtual void record_spaces_top();
+
// Doesn't require additional work during GC prologue and epilogue
virtual bool performs_in_place_marking() const { return false; }
@@ -299,9 +302,12 @@
// For non-youngest collection, the DefNewGeneration can contribute
// "to-space".
- void contribute_scratch(ScratchBlock*& list, Generation* requestor,
+ virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words);
+ // Reset for contribution of "to-space".
+ virtual void reset_scratch();
+
// GC support
virtual void compute_new_size();
virtual void collect(bool full,
@@ -331,7 +337,12 @@
void verify(bool allow_dirty);
protected:
- void compute_space_boundaries(uintx minimum_eden_size);
+ // If clear_space is true, clear the survivor spaces. Eden is
+ // cleared if the minimum size of eden is 0. If mangle_space
+ // is true, also mangle the space in debug mode.
+ void compute_space_boundaries(uintx minimum_eden_size,
+ bool clear_space,
+ bool mangle_space);
// Scavenge support
void swap_spaces();
};
--- a/hotspot/src/share/vm/memory/dump.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/dump.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -645,7 +645,7 @@
class ClearSpaceClosure : public SpaceClosure {
public:
void do_space(Space* s) {
- s->clear();
+ s->clear(SpaceDecorator::Mangle);
}
};
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -465,6 +465,11 @@
_gens[i]->stat_record()->invocations++;
_gens[i]->stat_record()->accumulated_time.start();
+ // Must be done anew before each collection because
+ // a previous collection will do mangling and will
+ // change top of some spaces.
+ record_gen_tops_before_GC();
+
if (PrintGC && Verbose) {
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
i,
@@ -1058,6 +1063,12 @@
return res;
}
+void GenCollectedHeap::release_scratch() {
+ for (int i = 0; i < _n_gens; i++) {
+ _gens[i]->reset_scratch();
+ }
+}
+
size_t GenCollectedHeap::large_typearray_limit() {
return gen_policy()->large_typearray_limit();
}
@@ -1285,6 +1296,24 @@
always_do_update_barrier = UseConcMarkSweepGC;
};
+#ifndef PRODUCT
+class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
+ private:
+ public:
+ void do_generation(Generation* gen) {
+ gen->record_spaces_top();
+ }
+};
+
+void GenCollectedHeap::record_gen_tops_before_GC() {
+ if (ZapUnusedHeapArea) {
+ GenGCSaveTopsBeforeGCClosure blk;
+ generation_iterate(&blk, false); // not old-to-young.
+ perm_gen()->record_spaces_top();
+ }
+}
+#endif // not PRODUCT
+
class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
public:
void do_generation(Generation* gen) {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -259,6 +259,9 @@
// be provided are returned as a list of ScratchBlocks, sorted by
// decreasing size.
ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
+ // Allow each generation to reset any scratch space that it has
+ // contributed as it needs.
+ void release_scratch();
size_t large_typearray_limit();
@@ -482,6 +485,9 @@
bool should_do_concurrent_full_gc(GCCause::Cause cause);
void collect_mostly_concurrent(GCCause::Cause cause);
+ // Save the tops of the spaces in all generations
+ void record_gen_tops_before_GC() PRODUCT_RETURN;
+
protected:
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -190,6 +190,10 @@
void GenMarkSweep::deallocate_stacks() {
+
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ gch->release_scratch();
+
if (_preserved_oop_stack) {
delete _preserved_mark_stack;
_preserved_mark_stack = NULL;
--- a/hotspot/src/share/vm/memory/generation.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/generation.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -32,6 +32,12 @@
vm_exit_during_initialization("Could not reserve enough space for "
"object heap");
}
+ // Mangle all of the the initial generation.
+ if (ZapUnusedHeapArea) {
+ MemRegion mangle_region((HeapWord*)_virtual_space.low(),
+ (HeapWord*)_virtual_space.high());
+ SpaceMangler::mangle_region(mangle_region);
+ }
_reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
(HeapWord*)_virtual_space.high_boundary());
}
@@ -505,8 +511,11 @@
_bts->resize(new_word_size);
// Fix for bug #4668531
- MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high());
- _the_space->mangle_region(mangle_region);
+ if (ZapUnusedHeapArea) {
+ MemRegion mangle_region(_the_space->end(),
+ (HeapWord*)_virtual_space.high());
+ SpaceMangler::mangle_region(mangle_region);
+ }
// Expand space -- also expands space's BOT
// (which uses (part of) shared array above)
@@ -622,6 +631,14 @@
// update the generation and space performance counters
update_counters();
+ if (ZapUnusedHeapArea) {
+ the_space()->check_mangled_unused_area_complete();
+ }
+}
+
+void OneContigSpaceCardGeneration::record_spaces_top() {
+ assert(ZapUnusedHeapArea, "Not mangling unused space");
+ the_space()->set_top_for_allocations();
}
void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
--- a/hotspot/src/share/vm/memory/generation.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/generation.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -376,6 +376,9 @@
// The default is to do nothing.
virtual void gc_epilogue(bool full) {};
+ // Save the high water marks for the used space in a generation.
+ virtual void record_spaces_top() {};
+
// Some generations may need to be "fixed-up" after some allocation
// activity to make them parsable again. The default is to do nothing.
virtual void ensure_parsability() {};
@@ -476,6 +479,10 @@
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words) {}
+ // Give each generation an opportunity to do clean up for any
+ // contributed scratch.
+ virtual void reset_scratch() {};
+
// When an older generation has been collected, and perhaps resized,
// this method will be invoked on all younger generations (from older to
// younger), allowing them to resize themselves as appropriate.
@@ -699,6 +706,8 @@
virtual void gc_epilogue(bool full);
+ virtual void record_spaces_top();
+
virtual void verify(bool allow_dirty);
virtual void print_on(outputStream* st) const;
};
--- a/hotspot/src/share/vm/memory/space.cpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/space.cpp Mon Jul 28 15:30:23 2008 -0700
@@ -232,30 +232,44 @@
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
}
-void Space::initialize(MemRegion mr, bool clear_space) {
+void Space::initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space) {
HeapWord* bottom = mr.start();
HeapWord* end = mr.end();
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
"invalid space boundaries");
set_bottom(bottom);
set_end(end);
- if (clear_space) clear();
+ if (clear_space) clear(mangle_space);
+}
+
+void Space::clear(bool mangle_space) {
+ if (ZapUnusedHeapArea && mangle_space) {
+ mangle_unused_area();
+ }
}
-void Space::clear() {
- if (ZapUnusedHeapArea) mangle_unused_area();
+ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
+ _mangler = new GenSpaceMangler(this);
}
-void ContiguousSpace::initialize(MemRegion mr, bool clear_space)
+ContiguousSpace::~ContiguousSpace() {
+ delete _mangler;
+}
+
+void ContiguousSpace::initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space)
{
- CompactibleSpace::initialize(mr, clear_space);
+ CompactibleSpace::initialize(mr, clear_space, mangle_space);
_concurrent_iteration_safe_limit = top();
}
-void ContiguousSpace::clear() {
+void ContiguousSpace::clear(bool mangle_space) {
set_top(bottom());
set_saved_mark();
- Space::clear();
+ Space::clear(mangle_space);
}
bool Space::is_in(const void* p) const {
@@ -271,8 +285,8 @@
return p >= _top;
}
-void OffsetTableContigSpace::clear() {
- ContiguousSpace::clear();
+void OffsetTableContigSpace::clear(bool mangle_space) {
+ ContiguousSpace::clear(mangle_space);
_offsets.initialize_threshold();
}
@@ -288,17 +302,46 @@
Space::set_end(new_end);
}
-void ContiguousSpace::mangle_unused_area() {
- // to-space is used for storing marks during mark-sweep
- mangle_region(MemRegion(top(), end()));
+#ifndef PRODUCT
+
+void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
+ mangler()->set_top_for_allocations(v);
+}
+void ContiguousSpace::set_top_for_allocations() {
+ mangler()->set_top_for_allocations(top());
+}
+void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
+ mangler()->check_mangled_unused_area(limit);
+}
+
+void ContiguousSpace::check_mangled_unused_area_complete() {
+ mangler()->check_mangled_unused_area_complete();
}
-void ContiguousSpace::mangle_region(MemRegion mr) {
- debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
+// Mangled only the unused space that has not previously
+// been mangled and that has not been allocated since being
+// mangled.
+void ContiguousSpace::mangle_unused_area() {
+ mangler()->mangle_unused_area();
+}
+void ContiguousSpace::mangle_unused_area_complete() {
+ mangler()->mangle_unused_area_complete();
}
+void ContiguousSpace::mangle_region(MemRegion mr) {
+ // Although this method uses SpaceMangler::mangle_region() which
+ // is not specific to a space, the when the ContiguousSpace version
+ // is called, it is always with regard to a space and this
+ // bounds checking is appropriate.
+ MemRegion space_mr(bottom(), end());
+ assert(space_mr.contains(mr), "Mangling outside space");
+ SpaceMangler::mangle_region(mr);
+}
+#endif // NOT_PRODUCT
-void CompactibleSpace::initialize(MemRegion mr, bool clear_space) {
- Space::initialize(mr, clear_space);
+void CompactibleSpace::initialize(MemRegion mr,
+ bool clear_space,
+ bool mangle_space) {
+ Space::initialize(mr, clear_space, mangle_space);
_compaction_top = bottom();
_next_compaction_space = NULL;
}
@@ -820,8 +863,8 @@
}
}
-void EdenSpace::clear() {
- ContiguousSpace::clear();
+void EdenSpace::clear(bool mangle_space) {
+ ContiguousSpace::clear(mangle_space);
set_soft_end(end());
}
@@ -878,7 +921,7 @@
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
{
_offsets.set_contig_space(this);
- initialize(mr, true);
+ initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
}
--- a/hotspot/src/share/vm/memory/space.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/memory/space.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -131,15 +131,17 @@
return MemRegion(bottom(), saved_mark_word());
}
- // Initialization
- virtual void initialize(MemRegion mr, bool clear_space);
- virtual void clear();
+ // Initialization. These may be run to reset an existing
+ // Space.
+ virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
+ virtual void clear(bool mangle_space);
// For detecting GC bugs. Should only be called at GC boundaries, since
// some unused space may be used as scratch space during GC's.
// Default implementation does nothing. We also call this when expanding
// a space to satisfy an allocation request. See bug #4668531
virtual void mangle_unused_area() {}
+ virtual void mangle_unused_area_complete() {}
virtual void mangle_region(MemRegion mr) {}
// Testers
@@ -354,7 +356,7 @@
CompactibleSpace* _next_compaction_space;
public:
- virtual void initialize(MemRegion mr, bool clear_space);
+ virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Used temporarily during a compaction phase to hold the value
// top should have when compaction is complete.
@@ -724,12 +726,14 @@
/* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \
if (is_empty()) { \
- clear(); \
+ clear(SpaceDecorator::Mangle); \
} else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \
} \
}
+class GenSpaceMangler;
+
// A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace {
@@ -738,13 +742,21 @@
protected:
HeapWord* _top;
HeapWord* _concurrent_iteration_safe_limit;
+ // A helper for mangling the unused area of the space in debug builds.
+ GenSpaceMangler* _mangler;
+
+ GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
public:
- virtual void initialize(MemRegion mr, bool clear_space);
+
+ ContiguousSpace();
+ ~ContiguousSpace();
+
+ virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Accessors
HeapWord* top() const { return _top; }
@@ -753,15 +765,34 @@
void set_saved_mark() { _saved_mark_word = top(); }
void reset_saved_mark() { _saved_mark_word = bottom(); }
- virtual void clear();
+ virtual void clear(bool mangle_space);
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
WaterMark top_mark() { return WaterMark(this, top()); }
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
- void mangle_unused_area();
- void mangle_region(MemRegion mr);
+ // In debug mode mangle (write it with a particular bit
+ // pattern) the unused part of a space.
+
+ // Used to save the an address in a space for later use during mangling.
+ void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
+ // Used to save the space's current top for later use during mangling.
+ void set_top_for_allocations() PRODUCT_RETURN;
+
+ // Mangle regions in the space from the current top up to the
+ // previously mangled part of the space.
+ void mangle_unused_area() PRODUCT_RETURN;
+ // Mangle [top, end)
+ void mangle_unused_area_complete() PRODUCT_RETURN;
+ // Mangle the given MemRegion.
+ void mangle_region(MemRegion mr) PRODUCT_RETURN;
+
+ // Do some sparse checking on the area that should have been mangled.
+ void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
+ // Check the complete area that should have been mangled.
+ // This code may be NULL depending on the macro DEBUG_MANGLING.
+ void check_mangled_unused_area_complete() PRODUCT_RETURN;
// Size computations: sizes in bytes.
size_t capacity() const { return byte_size(bottom(), end()); }
@@ -956,7 +987,7 @@
void set_soft_end(HeapWord* value) { _soft_end = value; }
// Override.
- void clear();
+ void clear(bool mangle_space);
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
void set_end(HeapWord* value) {
@@ -1000,7 +1031,7 @@
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
- void clear();
+ void clear(bool mangle_space);
inline HeapWord* block_start(const void* p) const;
--- a/hotspot/src/share/vm/runtime/globals.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -589,9 +589,15 @@
develop(bool, ZapJNIHandleArea, trueInDebug, \
"Zap freed JNI handle space with 0xFEFEFEFE") \
\
- develop(bool, ZapUnusedHeapArea, false, \
+ develop(bool, ZapUnusedHeapArea, trueInDebug, \
"Zap unused heap space with 0xBAADBABE") \
\
+ develop(bool, TraceZapUnusedHeapArea, false, \
+ "Trace zapping of unused heap space") \
+ \
+ develop(bool, CheckZapUnusedHeapArea, false, \
+ "Check zapping of unused heap space") \
+ \
develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \
\
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Fri Jul 25 11:29:03 2008 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Jul 28 15:30:23 2008 -0700
@@ -97,8 +97,12 @@
// object size.
class HeapWord {
friend class VMStructs;
-private:
+ private:
char* i;
+#ifdef ASSERT
+ public:
+ char* value() { return i; }
+#endif
};
// HeapWordSize must be 2^LogHeapWordSize.