diff -r 590560cfc997 -r 371690c4f281 hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Mar 16 16:14:04 2012 +0100 +++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Mar 12 14:59:00 2012 -0700 @@ -34,7 +34,8 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation( unsigned int gc_count_before, size_t word_size) - : VM_G1OperationWithAllocRequest(gc_count_before, word_size) { + : VM_G1OperationWithAllocRequest(gc_count_before, word_size, + GCCause::_allocation_failure) { guarantee(word_size > 0, "an allocation should always be requested"); } @@ -57,9 +58,10 @@ bool should_initiate_conc_mark, double target_pause_time_ms, GCCause::Cause gc_cause) - : VM_G1OperationWithAllocRequest(gc_count_before, word_size), + : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause), _should_initiate_conc_mark(should_initiate_conc_mark), _target_pause_time_ms(target_pause_time_ms), + _should_retry_gc(false), _full_collections_completed_before(0) { guarantee(target_pause_time_ms > 0.0, err_msg("target_pause_time_ms = %1.6lf should be positive", @@ -70,6 +72,22 @@ _gc_cause = gc_cause; } +bool VM_G1IncCollectionPause::doit_prologue() { + bool res = VM_GC_Operation::doit_prologue(); + if (!res) { + if (_should_initiate_conc_mark) { + // The prologue can fail for a couple of reasons. The first is that another GC + // got scheduled and prevented the scheduling of the initial mark GC. The + // second is that the GC locker may be active and the heap can't be expanded. + // In both cases we want to retry the GC so that the initial mark pause is + // actually scheduled. In the second case, however, we should stall until + // until the GC locker is no longer active and then retry the initial mark GC. + _should_retry_gc = true; + } + } + return res; +} + void VM_G1IncCollectionPause::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert(!_should_initiate_conc_mark || @@ -106,11 +124,25 @@ // next GC pause to be an initial mark; it returns false if a // marking cycle is already in progress. // - // If a marking cycle is already in progress just return and skip - // the pause - the requesting thread should block in doit_epilogue - // until the marking cycle is complete. + // If a marking cycle is already in progress just return and skip the + // pause below - if the reason for requesting this initial mark pause + // was due to a System.gc() then the requesting thread should block in + // doit_epilogue() until the marking cycle is complete. + // + // If this initial mark pause was requested as part of a humongous + // allocation then we know that the marking cycle must just have + // been started by another thread (possibly also allocating a humongous + // object) as there was no active marking cycle when the requesting + // thread checked before calling collect() in + // attempt_allocation_humongous(). Retrying the GC, in this case, + // will cause the requesting thread to spin inside collect() until the + // just started marking cycle is complete - which may be a while. So + // we do NOT retry the GC. if (!res) { - assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating"); + assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); + if (_gc_cause != GCCause::_g1_humongous_allocation) { + _should_retry_gc = true; + } return; } } @@ -123,6 +155,13 @@ true /* expect_null_cur_alloc_region */); } else { assert(_result == NULL, "invariant"); + if (!_pause_succeeded) { + // Another possible reason reason for the pause to not be successful + // is that, again, the GC locker is active (and has become active + // since the prologue was executed). In this case we should retry + // the pause after waiting for the GC locker to become inactive. + _should_retry_gc = true; + } } } @@ -168,6 +207,7 @@ } void VM_CGC_Operation::acquire_pending_list_lock() { + assert(_needs_pll, "don't call this otherwise"); // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. ConcurrentMarkThread::slt()-> @@ -175,6 +215,7 @@ } void VM_CGC_Operation::release_and_notify_pending_list_lock() { + assert(_needs_pll, "don't call this otherwise"); // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. ConcurrentMarkThread::slt()-> @@ -198,7 +239,9 @@ bool VM_CGC_Operation::doit_prologue() { // Note the relative order of the locks must match that in // VM_GC_Operation::doit_prologue() or deadlocks can occur - acquire_pending_list_lock(); + if (_needs_pll) { + acquire_pending_list_lock(); + } Heap_lock->lock(); SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true; @@ -210,5 +253,7 @@ // VM_GC_Operation::doit_epilogue() SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; Heap_lock->unlock(); - release_and_notify_pending_list_lock(); + if (_needs_pll) { + release_and_notify_pending_list_lock(); + } }