# HG changeset patch # User kbarrett # Date 1429726009 14400 # Node ID c3b8486760aa1dab81711d688ddb332dfae43c71 # Parent 928bec4e217f6d84d51c80721e3857b13ce037bb 8078023: verify_no_cset_oops found reclaimed humongous object in SATB buffer Summary: Removed no longer valid checking of SATB buffers Reviewed-by: jmasa, pliden diff -r 928bec4e217f -r c3b8486760aa hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Apr 22 12:58:10 2015 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Apr 22 14:06:49 2015 -0400 @@ -2793,9 +2793,7 @@ #ifndef PRODUCT enum VerifyNoCSetOopsPhase { VerifyNoCSetOopsStack, - VerifyNoCSetOopsQueues, - VerifyNoCSetOopsSATBCompleted, - VerifyNoCSetOopsSATBThread + VerifyNoCSetOopsQueues }; class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { @@ -2808,8 +2806,6 @@ switch (_phase) { case VerifyNoCSetOopsStack: return "Stack"; case VerifyNoCSetOopsQueues: return "Queue"; - case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; - case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; default: ShouldNotReachHere(); } return NULL; @@ -2836,7 +2832,7 @@ virtual void do_oop(narrowOop* p) { // We should not come across narrow oops while scanning marking - // stacks and SATB buffers. + // stacks ShouldNotReachHere(); } @@ -2845,10 +2841,7 @@ } }; -void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, - bool verify_enqueued_buffers, - bool verify_thread_buffers, - bool verify_fingers) { +void ConcurrentMark::verify_no_cset_oops() { assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); if (!G1CollectedHeap::heap()->mark_in_progress()) { return; @@ -2856,65 +2849,47 @@ VerifyNoCSetOopsClosure cl; - if (verify_stacks) { - // Verify entries on the global mark stack - cl.set_phase(VerifyNoCSetOopsStack); - _markStack.oops_do(&cl); - - // Verify entries on the task queues - for (uint i = 0; i < _max_worker_id; i += 1) { - cl.set_phase(VerifyNoCSetOopsQueues, i); - CMTaskQueue* queue = _task_queues->queue(i); - queue->oops_do(&cl); - } - } - - SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); - - // Verify entries on the enqueued SATB buffers - if (verify_enqueued_buffers) { - cl.set_phase(VerifyNoCSetOopsSATBCompleted); - satb_qs.iterate_completed_buffers_read_only(&cl); - } - - // Verify entries on the per-thread SATB buffers - if (verify_thread_buffers) { - cl.set_phase(VerifyNoCSetOopsSATBThread); - satb_qs.iterate_thread_buffers_read_only(&cl); + // Verify entries on the global mark stack + cl.set_phase(VerifyNoCSetOopsStack); + _markStack.oops_do(&cl); + + // Verify entries on the task queues + for (uint i = 0; i < _max_worker_id; i += 1) { + cl.set_phase(VerifyNoCSetOopsQueues, i); + CMTaskQueue* queue = _task_queues->queue(i); + queue->oops_do(&cl); } - if (verify_fingers) { - // Verify the global finger - HeapWord* global_finger = finger(); - if (global_finger != NULL && global_finger < _heap_end) { - // The global finger always points to a heap region boundary. We - // use heap_region_containing_raw() to get the containing region - // given that the global finger could be pointing to a free region - // which subsequently becomes continues humongous. If that - // happens, heap_region_containing() will return the bottom of the - // corresponding starts humongous region and the check below will - // not hold any more. - // Since we always iterate over all regions, we might get a NULL HeapRegion - // here. - HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); - guarantee(global_hr == NULL || global_finger == global_hr->bottom(), - err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, - p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); - } - - // Verify the task fingers - assert(parallel_marking_threads() <= _max_worker_id, "sanity"); - for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { - CMTask* task = _tasks[i]; - HeapWord* task_finger = task->finger(); - if (task_finger != NULL && task_finger < _heap_end) { - // See above note on the global finger verification. - HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); - guarantee(task_hr == NULL || task_finger == task_hr->bottom() || - !task_hr->in_collection_set(), - err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, - p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); - } + // Verify the global finger + HeapWord* global_finger = finger(); + if (global_finger != NULL && global_finger < _heap_end) { + // The global finger always points to a heap region boundary. We + // use heap_region_containing_raw() to get the containing region + // given that the global finger could be pointing to a free region + // which subsequently becomes continues humongous. If that + // happens, heap_region_containing() will return the bottom of the + // corresponding starts humongous region and the check below will + // not hold any more. + // Since we always iterate over all regions, we might get a NULL HeapRegion + // here. + HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); + guarantee(global_hr == NULL || global_finger == global_hr->bottom(), + err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, + p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); + } + + // Verify the task fingers + assert(parallel_marking_threads() <= _max_worker_id, "sanity"); + for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { + CMTask* task = _tasks[i]; + HeapWord* task_finger = task->finger(); + if (task_finger != NULL && task_finger < _heap_end) { + // See above note on the global finger verification. + HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); + guarantee(task_hr == NULL || task_finger == task_hr->bottom() || + !task_hr->in_collection_set(), + err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, + p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); } } } diff -r 928bec4e217f -r c3b8486760aa hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Apr 22 12:58:10 2015 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Apr 22 14:06:49 2015 -0400 @@ -785,14 +785,9 @@ } // Verify that there are no CSet oops on the stacks (taskqueues / - // global mark stack), enqueued SATB buffers, per-thread SATB - // buffers, and fingers (global / per-task). The boolean parameters - // decide which of the above data structures to verify. If marking - // is not in progress, it's a no-op. - void verify_no_cset_oops(bool verify_stacks, - bool verify_enqueued_buffers, - bool verify_thread_buffers, - bool verify_fingers) PRODUCT_RETURN; + // global mark stack) and fingers (global / per-task). + // If marking is not in progress, it's a no-op. + void verify_no_cset_oops() PRODUCT_RETURN; bool isPrevMarked(oop p) const { assert(p != NULL && p->is_oop(), "expected an oop"); diff -r 928bec4e217f -r c3b8486760aa hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Apr 22 12:58:10 2015 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Apr 22 14:06:49 2015 -0400 @@ -3806,14 +3806,9 @@ assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); _cm->note_start_of_gc(); - // We should not verify the per-thread SATB buffers given that - // we have not filtered them yet (we'll do so during the - // GC). We also call this after finalize_cset() to + // We call this after finalize_cset() to // ensure that the CSet has been finalized. - _cm->verify_no_cset_oops(true /* verify_stacks */, - true /* verify_enqueued_buffers */, - false /* verify_thread_buffers */, - true /* verify_fingers */); + _cm->verify_no_cset_oops(); if (_hr_printer.is_active()) { HeapRegion* hr = g1_policy()->collection_set(); @@ -3836,16 +3831,6 @@ // Actually do the work... evacuate_collection_set(evacuation_info); - // We do this to mainly verify the per-thread SATB buffers - // (which have been filtered by now) since we didn't verify - // them earlier. No point in re-checking the stacks / enqueued - // buffers given that the CSet has not changed since last time - // we checked. - _cm->verify_no_cset_oops(false /* verify_stacks */, - false /* verify_enqueued_buffers */, - true /* verify_thread_buffers */, - true /* verify_fingers */); - free_collection_set(g1_policy()->collection_set(), evacuation_info); eagerly_reclaim_humongous_regions(); @@ -3928,10 +3913,7 @@ // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. - _cm->verify_no_cset_oops(true /* verify_stacks */, - true /* verify_enqueued_buffers */, - true /* verify_thread_buffers */, - true /* verify_fingers */); + _cm->verify_no_cset_oops(); _cm->note_end_of_gc(); // This timing is only used by the ergonomics to handle our pause target. diff -r 928bec4e217f -r c3b8486760aa hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Wed Apr 22 12:58:10 2015 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Wed Apr 22 14:06:49 2015 -0400 @@ -180,12 +180,6 @@ return should_enqueue; } -void ObjPtrQueue::apply_closure(ObjectClosure* cl) { - if (_buf != NULL) { - apply_closure_to_buffer(cl, _buf, _index, _sz); - } -} - void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) { if (_buf != NULL) { apply_closure_to_buffer(cl, _buf, _index, _sz); @@ -317,28 +311,6 @@ } } -void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) { - assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); - assert(cl != NULL, "pre-condition"); - - BufferNode* nd = _completed_buffers_head; - while (nd != NULL) { - void** buf = BufferNode::make_buffer_from_node(nd); - ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz); - nd = nd->next(); - } -} - -void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) { - assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); - assert(cl != NULL, "pre-condition"); - - for (JavaThread* t = Threads::first(); t; t = t->next()) { - t->satb_mark_queue().apply_closure(cl); - } - shared_satb_queue()->apply_closure(cl); -} - #ifndef PRODUCT // Helpful for debugging diff -r 928bec4e217f -r c3b8486760aa hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Wed Apr 22 12:58:10 2015 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Wed Apr 22 14:06:49 2015 -0400 @@ -41,9 +41,6 @@ // Filter out unwanted entries from the buffer. void filter(); - // Apply the closure to all elements. - void apply_closure(ObjectClosure* cl); - // Apply the closure to all elements and empty the buffer; void apply_closure_and_empty(ObjectClosure* cl); @@ -105,12 +102,6 @@ // completed buffers exist, return false. bool apply_closure_to_completed_buffer(ObjectClosure* closure); - // Apply the given closure on enqueued and currently-active buffers - // respectively. Both methods are read-only, i.e., they do not - // modify any of the buffers. - void iterate_completed_buffers_read_only(ObjectClosure* cl); - void iterate_thread_buffers_read_only(ObjectClosure* cl); - #ifndef PRODUCT // Helpful for debugging void print_all(const char* msg);