6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
Reviewed-by: ysr, jmasa
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -44,52 +44,12 @@
bool lock_owned = lock->owned_by_self();
if (lock_owned) {
MutexUnlocker mul(lock);
- return mem_allocate_work(size);
+ return mem_allocate_in_gen(size, _gen);
} else {
- return mem_allocate_work(size);
+ return mem_allocate_in_gen(size, _gen);
}
}
-HeapWord* CMSPermGen::mem_allocate_work(size_t size) {
- assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock");
-
- MutexLocker ml(Heap_lock);
- HeapWord* obj = NULL;
-
- obj = _gen->allocate(size, false);
- // Since we want to minimize pause times, we will prefer
- // expanding the perm gen rather than doing a stop-world
- // collection to satisfy the allocation request.
- if (obj == NULL) {
- // Try to expand the perm gen and allocate space.
- obj = _gen->expand_and_allocate(size, false, false);
- if (obj == NULL) {
- // Let's see if a normal stop-world full collection will
- // free up enough space.
- SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
- obj = _gen->allocate(size, false);
- if (obj == NULL) {
- // The collection above may have shrunk the space, so try
- // to expand again and allocate space.
- obj = _gen->expand_and_allocate(size, false, false);
- }
- if (obj == NULL) {
- // We have not been able to allocate space despite a
- // full stop-world collection. We now make a last-ditch collection
- // attempt (in which soft refs are all aggressively freed)
- // that will try to reclaim as much space as possible.
- SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
- obj = _gen->allocate(size, false);
- if (obj == NULL) {
- // Expand generation in case it was shrunk following the collection.
- obj = _gen->expand_and_allocate(size, false, false);
- }
- }
- }
- }
- return obj;
-}
-
void CMSPermGen::compute_new_size() {
_gen->compute_new_size();
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsPermGen.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -29,7 +29,6 @@
class CMSPermGen: public PermGen {
friend class VMStructs;
- HeapWord* mem_allocate_work(size_t size);
protected:
// The "generation" view.
ConcurrentMarkSweepGeneration* _gen;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -590,6 +590,31 @@
full_gc_count = Universe::heap()->total_full_collections();
result = perm_gen()->allocate_permanent(size);
+
+ if (result != NULL) {
+ return result;
+ }
+
+ if (GC_locker::is_active_and_needs_gc()) {
+ // If this thread is not in a jni critical section, we stall
+ // the requestor until the critical section has cleared and
+ // GC allowed. When the critical section clears, a GC is
+ // initiated by the last thread exiting the critical section; so
+ // we retry the allocation sequence from the beginning of the loop,
+ // rather than causing more, now probably unnecessary, GC attempts.
+ JavaThread* jthr = JavaThread::current();
+ if (!jthr->in_critical()) {
+ MutexUnlocker mul(Heap_lock);
+ GC_locker::stall_until_clear();
+ continue;
+ } else {
+ if (CheckJNICalls) {
+ fatal("Possible deadlock due to allocating while"
+ " in jni critical section");
+ }
+ return NULL;
+ }
+ }
}
if (result == NULL) {
@@ -622,6 +647,12 @@
if (op.prologue_succeeded()) {
assert(Universe::heap()->is_in_permanent_or_null(op.result()),
"result not in heap");
+ // If GC was locked out during VM operation then retry allocation
+ // and/or stall as necessary.
+ if (op.gc_locked()) {
+ assert(op.result() == NULL, "must be NULL if gc_locked() is true");
+ continue; // retry and/or stall as necessary
+ }
// If a NULL results is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded
// flag to avoid the following situation.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -999,7 +999,7 @@
DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
// Increment the invocation count
- heap->increment_total_collections();
+ heap->increment_total_collections(true);
// We need to track unique mark sweep invocations as well.
_total_invocations++;
@@ -1964,7 +1964,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity");
- if (GC_locker::is_active()) {
+ if (GC_locker::check_active_before_gc()) {
return;
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -69,6 +69,9 @@
GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_permanent_mem_allocate(_size);
+ if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+ set_gc_locked();
+ }
notify_gc_end();
}
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -144,3 +144,18 @@
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
}
+
+void VM_GenCollectForPermanentAllocation::doit() {
+ JvmtiGCForAllocationMarker jgcm;
+ notify_gc_begin(true);
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ GCCauseSetter gccs(gch, _gc_cause);
+ gch->do_full_collection(gch->must_clear_all_soft_refs(),
+ gch->n_gens() - 1);
+ _res = gch->perm_gen()->allocate(_size, false);
+ assert(gch->is_in_reserved_or_null(_res), "result not in heap");
+ if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
+ set_gc_locked();
+ }
+ notify_gc_end();
+}
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -43,6 +43,7 @@
// is specified; and also the attach "inspectheap" operation
//
// VM_GenCollectForAllocation
+// VM_GenCollectForPermanentAllocation
// VM_ParallelGCFailedAllocation
// VM_ParallelGCFailedPermanentAllocation
// - this operation is invoked when allocation is failed;
@@ -166,3 +167,23 @@
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit();
};
+
+class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
+ private:
+ HeapWord* _res;
+ size_t _size; // size of object to be allocated
+ public:
+ VM_GenCollectForPermanentAllocation(size_t size,
+ unsigned int gc_count_before,
+ unsigned int full_gc_count_before,
+ GCCause::Cause gc_cause)
+ : VM_GC_Operation(gc_count_before, full_gc_count_before, true),
+ _size(size) {
+ _res = NULL;
+ _gc_cause = gc_cause;
+ }
+ ~VM_GenCollectForPermanentAllocation() {}
+ virtual VMOp_Type type() const { return VMOp_GenCollectForPermanentAllocation; }
+ virtual void doit();
+ HeapWord* result() const { return _res; }
+};
--- a/hotspot/src/share/vm/includeDB_core Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/includeDB_core Tue Apr 01 15:13:47 2008 +0400
@@ -719,6 +719,11 @@
ciObjArray.cpp ciUtilities.hpp
ciObjArray.cpp objArrayOop.hpp
+ciObjArray.cpp ciObjArray.hpp
+ciObjArray.cpp ciNullObject.hpp
+ciObjArray.cpp ciUtilities.hpp
+ciObjArray.cpp objArrayOop.hpp
+
ciObjArrayKlass.cpp ciInstanceKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp
@@ -1636,6 +1641,7 @@
gcLocker.cpp gcLocker.inline.hpp
gcLocker.cpp sharedHeap.hpp
+gcLocker.cpp resourceArea.hpp
gcLocker.hpp collectedHeap.hpp
gcLocker.hpp genCollectedHeap.hpp
@@ -3061,13 +3067,14 @@
oopMap.cpp signature.hpp
oopMap.hpp allocation.hpp
+oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMap.hpp compressedStream.hpp
oopMap.hpp growableArray.hpp
oopMap.hpp vmreg.hpp
oopMapCache.cpp allocation.inline.hpp
+oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMapCache.cpp handles.inline.hpp
-oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMapCache.cpp oop.inline.hpp
oopMapCache.cpp oopMapCache.hpp
oopMapCache.cpp resourceArea.hpp
@@ -3315,6 +3322,10 @@
permGen.cpp oop.inline.hpp
permGen.cpp permGen.hpp
permGen.cpp universe.hpp
+permGen.cpp gcLocker.hpp
+permGen.cpp gcLocker.inline.hpp
+permGen.cpp vmGCOperations.hpp
+permGen.cpp vmThread.hpp
permGen.hpp gcCause.hpp
permGen.hpp generation.hpp
--- a/hotspot/src/share/vm/memory/gcLocker.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -32,6 +32,12 @@
void GC_locker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock");
+ if (PrintJNIGCStalls && PrintGCDetails) {
+ ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+ gclog_or_tty->print_cr(
+ "Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
+ JavaThread::current()->name());
+ }
MutexLocker ml(JNICritical_lock);
// Wait for _needs_gc to be cleared
while (GC_locker::needs_gc()) {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -35,6 +35,7 @@
friend class CMSCollector;
friend class GenMarkSweep;
friend class VM_GenCollectForAllocation;
+ friend class VM_GenCollectForPermanentAllocation;
friend class VM_GenCollectFull;
friend class VM_GenCollectFullConcurrent;
friend class VM_GC_HeapInspection;
--- a/hotspot/src/share/vm/memory/permGen.cpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/memory/permGen.cpp Tue Apr 01 15:13:47 2008 +0400
@@ -25,6 +25,70 @@
#include "incls/_precompiled.incl"
#include "incls/_permGen.cpp.incl"
+HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
+ MutexLocker ml(Heap_lock);
+ GCCause::Cause next_cause = GCCause::_permanent_generation_full;
+ GCCause::Cause prev_cause = GCCause::_no_gc;
+
+ for (;;) {
+ HeapWord* obj = gen->allocate(size, false);
+ if (obj != NULL) {
+ return obj;
+ }
+ if (gen->capacity() < _capacity_expansion_limit ||
+ prev_cause != GCCause::_no_gc) {
+ obj = gen->expand_and_allocate(size, false);
+ }
+ if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) {
+ if (GC_locker::is_active_and_needs_gc()) {
+ // If this thread is not in a jni critical section, we stall
+ // the requestor until the critical section has cleared and
+ // GC allowed. When the critical section clears, a GC is
+ // initiated by the last thread exiting the critical section; so
+ // we retry the allocation sequence from the beginning of the loop,
+ // rather than causing more, now probably unnecessary, GC attempts.
+ JavaThread* jthr = JavaThread::current();
+ if (!jthr->in_critical()) {
+ MutexUnlocker mul(Heap_lock);
+ // Wait for JNI critical section to be exited
+ GC_locker::stall_until_clear();
+ continue;
+ } else {
+ if (CheckJNICalls) {
+ fatal("Possible deadlock due to allocating while"
+ " in jni critical section");
+ }
+ return NULL;
+ }
+ }
+
+ // Read the GC count while holding the Heap_lock
+ unsigned int gc_count_before = SharedHeap::heap()->total_collections();
+ unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections();
+ {
+ MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
+ VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
+ next_cause);
+ VMThread::execute(&op);
+ if (!op.prologue_succeeded() || op.gc_locked()) {
+ assert(op.result() == NULL, "must be NULL if gc_locked() is true");
+ continue; // retry and/or stall as necessary
+ }
+ obj = op.result();
+ assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
+ "result not in heap");
+ if (obj != NULL) {
+ return obj;
+ }
+ }
+ prev_cause = next_cause;
+ next_cause = GCCause::_last_ditch_collection;
+ } else {
+ return obj;
+ }
+ }
+}
+
CompactingPermGen::CompactingPermGen(ReservedSpace rs,
ReservedSpace shared_rs,
size_t initial_byte_size,
@@ -44,40 +108,7 @@
}
HeapWord* CompactingPermGen::mem_allocate(size_t size) {
- MutexLocker ml(Heap_lock);
- HeapWord* obj = _gen->allocate(size, false);
- bool tried_collection = false;
- bool tried_expansion = false;
- while (obj == NULL) {
- if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) {
- // Expansion limit reached, try collection before expanding further
- // For now we force a full collection, this could be changed
- SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
- obj = _gen->allocate(size, false);
- tried_collection = true;
- tried_expansion = false; // ... following the collection:
- // the collection may have shrunk the space.
- }
- if (obj == NULL && !tried_expansion) {
- obj = _gen->expand_and_allocate(size, false);
- tried_expansion = true;
- }
- if (obj == NULL && tried_collection && tried_expansion) {
- // We have not been able to allocate despite a collection and
- // an attempted space expansion. We now make a last-ditch collection
- // attempt that will try to reclaim as much space as possible (for
- // example by aggressively clearing all soft refs).
- SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
- obj = _gen->allocate(size, false);
- if (obj == NULL) {
- // An expansion attempt is necessary since the previous
- // collection may have shrunk the space.
- obj = _gen->expand_and_allocate(size, false);
- }
- break;
- }
- }
- return obj;
+ return mem_allocate_in_gen(size, _gen);
}
void CompactingPermGen::compute_new_size() {
--- a/hotspot/src/share/vm/memory/permGen.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/memory/permGen.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -38,6 +38,8 @@
size_t _capacity_expansion_limit; // maximum expansion allowed without a
// full gc occuring
+ HeapWord* mem_allocate_in_gen(size_t size, Generation* gen);
+
public:
enum Name {
MarkSweepCompact, MarkSweep, ConcurrentMarkSweep
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -1919,6 +1919,10 @@
develop(bool, IgnoreLibthreadGPFault, false, \
"Suppress workaround for libthread GP fault") \
\
+ product(bool, PrintJNIGCStalls, false, \
+ "Print diagnostic message when GC is stalled" \
+ "by JNI critical section") \
+ \
/* JVMTI heap profiling */ \
\
diagnostic(bool, TraceJVMTIObjectTagging, false, \
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp Thu Mar 27 17:22:06 2008 -0700
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp Tue Apr 01 15:13:47 2008 +0400
@@ -49,6 +49,7 @@
template(GenCollectFull) \
template(GenCollectFullConcurrent) \
template(GenCollectForAllocation) \
+ template(GenCollectForPermanentAllocation) \
template(ParallelGCFailedAllocation) \
template(ParallelGCFailedPermanentAllocation) \
template(ParallelGCSystemGC) \