--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp Fri Dec 11 09:30:48 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp Wed Dec 16 15:12:51 2009 -0800
@@ -64,8 +64,8 @@
while (_index == 0) {
handle_zero_index();
}
+
assert(_index > 0, "postcondition");
-
_index -= oopSize;
_buf[byte_index_to_index((int)_index)] = ptr;
assert(0 <= _index && _index <= _sz, "Invariant.");
@@ -99,95 +99,110 @@
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
if (_fl_owner->_buf_free_list != NULL) {
- void** res = _fl_owner->_buf_free_list;
- _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0];
+ void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
+ _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
_fl_owner->_buf_free_list_sz--;
- // Just override the next pointer with NULL, just in case we scan this part
- // of the buffer.
- res[0] = NULL;
return res;
} else {
- return (void**) NEW_C_HEAP_ARRAY(char, _sz);
+ // Allocate space for the BufferNode in front of the buffer.
+ char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
+ return BufferNode::make_buffer_from_block(b);
}
}
void PtrQueueSet::deallocate_buffer(void** buf) {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
- buf[0] = (void*)_fl_owner->_buf_free_list;
- _fl_owner->_buf_free_list = buf;
+ BufferNode *node = BufferNode::make_node_from_buffer(buf);
+ node->set_next(_fl_owner->_buf_free_list);
+ _fl_owner->_buf_free_list = node;
_fl_owner->_buf_free_list_sz++;
}
void PtrQueueSet::reduce_free_list() {
+ assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
// For now we'll adopt the strategy of deleting half.
MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
size_t n = _buf_free_list_sz / 2;
while (n > 0) {
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
- void** head = _buf_free_list;
- _buf_free_list = (void**)_buf_free_list[0];
- FREE_C_HEAP_ARRAY(char, head);
+ void* b = BufferNode::make_block_from_node(_buf_free_list);
+ _buf_free_list = _buf_free_list->next();
+ FREE_C_HEAP_ARRAY(char, b);
_buf_free_list_sz --;
n--;
}
}
-void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) {
- // I use explicit locking here because there's a bailout in the middle.
- _cbl_mon->lock_without_safepoint_check();
-
- Thread* thread = Thread::current();
- assert( ignore_max_completed ||
- thread->is_Java_thread() ||
- SafepointSynchronize::is_at_safepoint(),
- "invariant" );
- ignore_max_completed = ignore_max_completed || !thread->is_Java_thread();
+void PtrQueue::handle_zero_index() {
+ assert(0 == _index, "Precondition.");
+ // This thread records the full buffer and allocates a new one (while
+ // holding the lock if there is one).
+ if (_buf != NULL) {
+ if (_lock) {
+ locking_enqueue_completed_buffer(_buf);
+ } else {
+ if (qset()->process_or_enqueue_complete_buffer(_buf)) {
+ // Recycle the buffer. No allocation.
+ _sz = qset()->buffer_size();
+ _index = _sz;
+ return;
+ }
+ }
+ }
+ // Reallocate the buffer
+ _buf = qset()->allocate_buffer();
+ _sz = qset()->buffer_size();
+ _index = _sz;
+ assert(0 <= _index && _index <= _sz, "Invariant.");
+}
- if (!ignore_max_completed && _max_completed_queue > 0 &&
- _n_completed_buffers >= (size_t) _max_completed_queue) {
- _cbl_mon->unlock();
- bool b = mut_process_buffer(buf);
- if (b) {
- deallocate_buffer(buf);
- return;
+bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
+ if (Thread::current()->is_Java_thread()) {
+ // We don't lock. It is fine to be epsilon-precise here.
+ if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
+ _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
+ bool b = mut_process_buffer(buf);
+ if (b) {
+ // True here means that the buffer hasn't been deallocated and the caller may reuse it.
+ return true;
+ }
}
+ }
+ // The buffer will be enqueued. The caller will have to get a new one.
+ enqueue_complete_buffer(buf);
+ return false;
+}
- // Otherwise, go ahead and enqueue the buffer. Must reaquire the lock.
- _cbl_mon->lock_without_safepoint_check();
- }
-
- // Here we still hold the _cbl_mon.
- CompletedBufferNode* cbn = new CompletedBufferNode;
- cbn->buf = buf;
- cbn->next = NULL;
- cbn->index = index;
+void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ BufferNode* cbn = BufferNode::new_from_buffer(buf);
+ cbn->set_index(index);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");
_completed_buffers_head = cbn;
_completed_buffers_tail = cbn;
} else {
- _completed_buffers_tail->next = cbn;
+ _completed_buffers_tail->set_next(cbn);
_completed_buffers_tail = cbn;
}
_n_completed_buffers++;
- if (!_process_completed &&
+ if (!_process_completed && _process_completed_threshold >= 0 &&
_n_completed_buffers >= _process_completed_threshold) {
_process_completed = true;
if (_notify_when_complete)
- _cbl_mon->notify_all();
+ _cbl_mon->notify();
}
debug_only(assert_completed_buffer_list_len_correct_locked());
- _cbl_mon->unlock();
}
int PtrQueueSet::completed_buffers_list_length() {
int n = 0;
- CompletedBufferNode* cbn = _completed_buffers_head;
+ BufferNode* cbn = _completed_buffers_head;
while (cbn != NULL) {
n++;
- cbn = cbn->next;
+ cbn = cbn->next();
}
return n;
}
@@ -198,7 +213,7 @@
}
void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
- guarantee((size_t)completed_buffers_list_length() == _n_completed_buffers,
+ guarantee(completed_buffers_list_length() == _n_completed_buffers,
"Completed buffer length is wrong.");
}
@@ -207,12 +222,8 @@
_sz = sz * oopSize;
}
-void PtrQueueSet::set_process_completed_threshold(size_t sz) {
- _process_completed_threshold = sz;
-}
-
-// Merge lists of buffers. Notify waiting threads if the length of the list
-// exceeds threshold. The source queue is emptied as a result. The queues
+// Merge lists of buffers. Notify the processing threads.
+// The source queue is emptied as a result. The queues
// must share the monitor.
void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
@@ -224,7 +235,7 @@
} else {
assert(_completed_buffers_head != NULL, "Well formedness");
if (src->_completed_buffers_head != NULL) {
- _completed_buffers_tail->next = src->_completed_buffers_head;
+ _completed_buffers_tail->set_next(src->_completed_buffers_head);
_completed_buffers_tail = src->_completed_buffers_tail;
}
}
@@ -237,31 +248,13 @@
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
_completed_buffers_head != NULL && _completed_buffers_tail != NULL,
"Sanity");
-
- if (!_process_completed &&
- _n_completed_buffers >= _process_completed_threshold) {
- _process_completed = true;
- if (_notify_when_complete)
- _cbl_mon->notify_all();
- }
}
-// Merge free lists of the two queues. The free list of the source
-// queue is emptied as a result. The queues must share the same
-// mutex that guards free lists.
-void PtrQueueSet::merge_freelists(PtrQueueSet* src) {
- assert(_fl_lock == src->_fl_lock, "Should share the same lock");
- MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
- if (_buf_free_list != NULL) {
- void **p = _buf_free_list;
- while (*p != NULL) {
- p = (void**)*p;
- }
- *p = src->_buf_free_list;
- } else {
- _buf_free_list = src->_buf_free_list;
+void PtrQueueSet::notify_if_necessary() {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
+ _process_completed = true;
+ if (_notify_when_complete)
+ _cbl_mon->notify();
}
- _buf_free_list_sz += src->_buf_free_list_sz;
- src->_buf_free_list = NULL;
- src->_buf_free_list_sz = 0;
}