8214201: Make PtrQueueSet completed buffer list private
Summary: Merge and make private in PtrQueueSet all completed buffer list handling
Reviewed-by: tschatzl, sjohanss
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp Wed Dec 26 19:24:00 2018 -0500
@@ -156,7 +156,7 @@
PtrQueueSet::initialize(cbl_mon, allocator);
_shared_dirty_card_queue.set_lock(lock);
if (init_free_ids) {
- _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
+ _free_ids = new FreeIdSet(num_par_ids(), cbl_mon);
}
}
@@ -217,29 +217,6 @@
return result;
}
-
-BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-
- if (_n_completed_buffers <= stop_at) {
- return NULL;
- }
-
- assert(_n_completed_buffers > 0, "invariant");
- assert(_completed_buffers_head != NULL, "invariant");
- assert(_completed_buffers_tail != NULL, "invariant");
-
- BufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- _n_completed_buffers--;
- if (_completed_buffers_head == NULL) {
- assert(_n_completed_buffers == 0, "Invariant");
- _completed_buffers_tail = NULL;
- }
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
- return nd;
-}
-
bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
G1RefineCardConcurrentlyClosure cl;
return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
@@ -267,7 +244,7 @@
} else {
// Return partially processed buffer to the queue.
guarantee(!during_pause, "Should never stop early");
- enqueue_complete_buffer(nd);
+ enqueue_completed_buffer(nd);
}
return true;
}
@@ -288,32 +265,9 @@
}
}
-// Deallocates any completed log buffers
-void DirtyCardQueueSet::clear() {
- BufferNode* buffers_to_delete = NULL;
- {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- while (_completed_buffers_head != NULL) {
- BufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- nd->set_next(buffers_to_delete);
- buffers_to_delete = nd;
- }
- _n_completed_buffers = 0;
- _completed_buffers_tail = NULL;
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
- }
- while (buffers_to_delete != NULL) {
- BufferNode* nd = buffers_to_delete;
- buffers_to_delete = nd->next();
- deallocate_buffer(nd);
- }
-
-}
-
void DirtyCardQueueSet::abandon_logs() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- clear();
+ abandon_completed_buffers();
// Since abandon is done only at safepoints, we can safely manipulate
// these queues.
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
@@ -333,10 +287,11 @@
// the global list of logs. Temporarily turn off the limit on the number
// of outstanding buffers.
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- SizeTFlagSetting local_max(_max_completed_buffers,
- MaxCompletedBuffersUnlimited);
+ size_t old_limit = max_completed_buffers();
+ set_max_completed_buffers(MaxCompletedBuffersUnlimited);
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
}
concatenate_log(_shared_dirty_card_queue);
+ set_max_completed_buffers(old_limit);
}
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp Wed Dec 26 19:24:00 2018 -0500
@@ -136,9 +136,7 @@
// must never return false. Must only be called during GC.
bool apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i);
- BufferNode* get_completed_buffer(size_t stop_at);
-
- void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
+ void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); }
// Applies the current closure to all completed buffers, non-consumptively.
// Can be used in parallel, all callers using the iteration state initialized
// by reset_for_par_iteration.
@@ -148,16 +146,12 @@
return &_shared_dirty_card_queue;
}
- // Deallocate any completed log buffers
- void clear();
-
// If a full collection is happening, reset partial logs, and ignore
// completed ones: the full collection will make them all irrelevant.
void abandon_logs();
// If any threads have partial logs, add them to the global list of logs.
void concatenate_logs();
- void clear_n_completed_buffers() { _n_completed_buffers = 0;}
jint processed_buffers_mut() {
return _processed_buffers_mut;
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Wed Dec 26 19:24:00 2018 -0500
@@ -1949,9 +1949,8 @@
while (dcqs.apply_closure_during_gc(cl, worker_i)) {
n_completed_buffers++;
}
+ assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
- dcqs.clear_n_completed_buffers();
- assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
// Computes the sum of the storage used by the various regions.
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Wed Dec 26 19:24:00 2018 -0500
@@ -75,7 +75,7 @@
set_active(true);
} else {
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
- dcqs.set_process_completed(true);
+ dcqs.set_process_completed_buffers(true);
}
_monitor->notify();
}
@@ -86,7 +86,7 @@
set_active(false);
} else {
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
- dcqs.set_process_completed(false);
+ dcqs.set_process_completed_buffers(false);
}
}
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Wed Dec 26 19:24:00 2018 -0500
@@ -54,7 +54,7 @@
// No work to do.
qset()->deallocate_buffer(node);
} else {
- qset()->enqueue_complete_buffer(node);
+ qset()->enqueue_completed_buffer(node);
}
_buf = NULL;
set_index(0);
@@ -165,11 +165,11 @@
_completed_buffers_tail(NULL),
_n_completed_buffers(0),
_process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
- _process_completed(false),
- _all_active(false),
+ _process_completed_buffers(false),
_notify_when_complete(notify_when_complete),
_max_completed_buffers(MaxCompletedBuffersUnlimited),
- _completed_buffers_padding(0)
+ _completed_buffers_padding(0),
+ _all_active(false)
{}
PtrQueueSet::~PtrQueueSet() {
@@ -211,11 +211,11 @@
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
_buf = NULL; // clear shared _buf field
- qset()->enqueue_complete_buffer(node);
+ qset()->enqueue_completed_buffer(node);
assert(_buf == NULL, "multiple enqueuers appear to be racing");
} else {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
- if (qset()->process_or_enqueue_complete_buffer(node)) {
+ if (qset()->process_or_enqueue_completed_buffer(node)) {
// Recycle the buffer. No allocation.
assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
assert(capacity() == qset()->buffer_size(), "invariant");
@@ -231,7 +231,7 @@
reset();
}
-bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
+bool PtrQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
if (Thread::current()->is_Java_thread()) {
// If the number of buffers exceeds the limit, make this Java
// thread do the processing itself. We don't lock to access
@@ -246,11 +246,11 @@
}
}
// The buffer will be enqueued. The caller will have to get a new one.
- enqueue_complete_buffer(node);
+ enqueue_completed_buffer(node);
return false;
}
-void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
+void PtrQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
cbn->set_next(NULL);
if (_completed_buffers_tail == NULL) {
@@ -263,35 +263,72 @@
}
_n_completed_buffers++;
- if (!_process_completed &&
+ if (!_process_completed_buffers &&
(_n_completed_buffers > _process_completed_buffers_threshold)) {
- _process_completed = true;
+ _process_completed_buffers = true;
if (_notify_when_complete) {
_cbl_mon->notify();
}
}
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
+ assert_completed_buffers_list_len_correct_locked();
+}
+
+BufferNode* PtrQueueSet::get_completed_buffer(size_t stop_at) {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+
+ if (_n_completed_buffers <= stop_at) {
+ return NULL;
+ }
+
+ assert(_n_completed_buffers > 0, "invariant");
+ assert(_completed_buffers_head != NULL, "invariant");
+ assert(_completed_buffers_tail != NULL, "invariant");
+
+ BufferNode* bn = _completed_buffers_head;
+ _n_completed_buffers--;
+ _completed_buffers_head = bn->next();
+ if (_completed_buffers_head == NULL) {
+ assert(_n_completed_buffers == 0, "invariant");
+ _completed_buffers_tail = NULL;
+ _process_completed_buffers = false;
+ }
+ assert_completed_buffers_list_len_correct_locked();
+ bn->set_next(NULL);
+ return bn;
}
-size_t PtrQueueSet::completed_buffers_list_length() {
- size_t n = 0;
- BufferNode* cbn = _completed_buffers_head;
- while (cbn != NULL) {
- n++;
- cbn = cbn->next();
+void PtrQueueSet::abandon_completed_buffers() {
+ BufferNode* buffers_to_delete = NULL;
+ {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ buffers_to_delete = _completed_buffers_head;
+ _completed_buffers_head = NULL;
+ _completed_buffers_tail = NULL;
+ _n_completed_buffers = 0;
+ _process_completed_buffers = false;
}
- return n;
+ while (buffers_to_delete != NULL) {
+ BufferNode* bn = buffers_to_delete;
+ buffers_to_delete = bn->next();
+ bn->set_next(NULL);
+ deallocate_buffer(bn);
+ }
}
-void PtrQueueSet::assert_completed_buffer_list_len_correct() {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- assert_completed_buffer_list_len_correct_locked();
+#ifdef ASSERT
+
+void PtrQueueSet::assert_completed_buffers_list_len_correct_locked() {
+ assert_lock_strong(_cbl_mon);
+ size_t n = 0;
+ for (BufferNode* bn = _completed_buffers_head; bn != NULL; bn = bn->next()) {
+ ++n;
+ }
+ assert(n == _n_completed_buffers,
+ "Completed buffer length is wrong: counted: " SIZE_FORMAT
+ ", expected: " SIZE_FORMAT, n, _n_completed_buffers);
}
-void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
- guarantee(completed_buffers_list_length() == _n_completed_buffers,
- "Completed buffer length is wrong.");
-}
+#endif // ASSERT
// Merge lists of buffers. Notify the processing threads.
// The source queue is emptied as a result. The queues
@@ -315,16 +352,18 @@
src->_n_completed_buffers = 0;
src->_completed_buffers_head = NULL;
src->_completed_buffers_tail = NULL;
+ src->_process_completed_buffers = false;
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
_completed_buffers_head != NULL && _completed_buffers_tail != NULL,
"Sanity");
+ assert_completed_buffers_list_len_correct_locked();
}
void PtrQueueSet::notify_if_necessary() {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if (_n_completed_buffers > _process_completed_buffers_threshold) {
- _process_completed = true;
+ _process_completed_buffers = true;
if (_notify_when_complete)
_cbl_mon->notify();
}
--- a/src/hotspot/share/gc/shared/ptrQueue.hpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/shared/ptrQueue.hpp Wed Dec 26 19:24:00 2018 -0500
@@ -275,19 +275,16 @@
// A PtrQueueSet represents resources common to a set of pointer queues.
// In particular, the individual queues allocate buffers from this shared
// set, and return completed buffers to the set.
-// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
class PtrQueueSet {
BufferNode::Allocator* _allocator;
-protected:
Monitor* _cbl_mon; // Protects the fields below.
BufferNode* _completed_buffers_head;
BufferNode* _completed_buffers_tail;
size_t _n_completed_buffers;
+
size_t _process_completed_buffers_threshold;
- volatile bool _process_completed;
-
- bool _all_active;
+ volatile bool _process_completed_buffers;
// If true, notify_all on _cbl_mon when the threshold is reached.
bool _notify_when_complete;
@@ -297,11 +294,11 @@
size_t _max_completed_buffers;
size_t _completed_buffers_padding;
- size_t completed_buffers_list_length();
- void assert_completed_buffer_list_len_correct_locked();
- void assert_completed_buffer_list_len_correct();
+ void assert_completed_buffers_list_len_correct_locked() NOT_DEBUG_RETURN;
protected:
+ bool _all_active;
+
// A mutator thread does the the work of processing a buffer.
// Returns "true" iff the work is complete (and the buffer may be
// deallocated).
@@ -318,6 +315,12 @@
// arguments.
void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator);
+ // For (unlocked!) iteration over the completed buffers.
+ BufferNode* completed_buffers_head() const { return _completed_buffers_head; }
+
+ // Deallocate all of the completed buffers.
+ void abandon_completed_buffers();
+
public:
// Return the buffer for a BufferNode of size buffer_size().
@@ -327,18 +330,21 @@
// to have been allocated with a size of buffer_size().
void deallocate_buffer(BufferNode* node);
- // Declares that "buf" is a complete buffer.
- void enqueue_complete_buffer(BufferNode* node);
+ // A completed buffer is a buffer the mutator is finished with, and
+ // is ready to be processed by the collector. It need not be full.
+
+ // Adds node to the completed buffer list.
+ void enqueue_completed_buffer(BufferNode* node);
+
+ // If the number of completed buffers is > stop_at, then remove and
+ // return a completed buffer from the list. Otherwise, return NULL.
+ BufferNode* get_completed_buffer(size_t stop_at = 0);
// To be invoked by the mutator.
- bool process_or_enqueue_complete_buffer(BufferNode* node);
+ bool process_or_enqueue_completed_buffer(BufferNode* node);
- bool completed_buffers_exist_dirty() {
- return _n_completed_buffers > 0;
- }
-
- bool process_completed_buffers() { return _process_completed; }
- void set_process_completed(bool x) { _process_completed = x; }
+ bool process_completed_buffers() { return _process_completed_buffers; }
+ void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
bool is_active() { return _all_active; }
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Tue Dec 25 18:35:42 2018 +0300
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Wed Dec 26 19:24:00 2018 -0500
@@ -180,17 +180,7 @@
}
bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
- BufferNode* nd = NULL;
- {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- if (_completed_buffers_head != NULL) {
- nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
- _n_completed_buffers--;
- if (_n_completed_buffers == 0) _process_completed = false;
- }
- }
+ BufferNode* nd = get_completed_buffer();
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
size_t index = nd->index();
@@ -216,7 +206,7 @@
tty->cr();
tty->print_cr("SATB BUFFERS [%s]", msg);
- BufferNode* nd = _completed_buffers_head;
+ BufferNode* nd = completed_buffers_head();
int i = 0;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
@@ -238,24 +228,7 @@
#endif // PRODUCT
void SATBMarkQueueSet::abandon_partial_marking() {
- BufferNode* buffers_to_delete = NULL;
- {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- while (_completed_buffers_head != NULL) {
- BufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- nd->set_next(buffers_to_delete);
- buffers_to_delete = nd;
- }
- _completed_buffers_tail = NULL;
- _n_completed_buffers = 0;
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
- }
- while (buffers_to_delete != NULL) {
- BufferNode* nd = buffers_to_delete;
- buffers_to_delete = nd->next();
- deallocate_buffer(nd);
- }
+ abandon_completed_buffers();
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
// So we can safely manipulate these queues.
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {