8154343: Make SATB related code available to other GCs
Summary: Move ptrQueue and satbMarkQueue files from g1 to shared.
Reviewed-by: rkennke, sangheki
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
#define SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
-#include "gc/g1/ptrQueue.hpp"
+#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
class FreeIdSet;
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Sat Aug 18 13:59:25 2018 -0400
@@ -27,9 +27,10 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
#include "logging/log.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp Sat Aug 18 13:59:25 2018 -0400
@@ -27,7 +27,7 @@
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
#include "oops/oop.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
#define SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
class G1CollectedHeap;
class JavaThread;
--- a/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -26,7 +26,7 @@
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
#include "utilities/sizes.hpp"
--- a/src/hotspot/share/gc/g1/ptrQueue.cpp Fri Aug 17 21:36:02 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/ptrQueue.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.inline.hpp"
-
-#include <new>
-
-PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
- _qset(qset),
- _active(active),
- _permanent(permanent),
- _index(0),
- _capacity_in_bytes(0),
- _buf(NULL),
- _lock(NULL)
-{}
-
-PtrQueue::~PtrQueue() {
- assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
-}
-
-void PtrQueue::flush_impl() {
- if (_buf != NULL) {
- BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
- if (is_empty()) {
- // No work to do.
- qset()->deallocate_buffer(node);
- } else {
- qset()->enqueue_complete_buffer(node);
- }
- _buf = NULL;
- set_index(0);
- }
-}
-
-
-void PtrQueue::enqueue_known_active(void* ptr) {
- while (_index == 0) {
- handle_zero_index();
- }
-
- assert(_buf != NULL, "postcondition");
- assert(index() > 0, "postcondition");
- assert(index() <= capacity(), "invariant");
- _index -= _element_size;
- _buf[index()] = ptr;
-}
-
-void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
- assert(_lock->owned_by_self(), "Required.");
- qset()->enqueue_complete_buffer(node);
-}
-
-
-BufferNode* BufferNode::allocate(size_t size) {
- size_t byte_size = size * sizeof(void*);
- void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
- return new (data) BufferNode;
-}
-
-void BufferNode::deallocate(BufferNode* node) {
- node->~BufferNode();
- FREE_C_HEAP_ARRAY(char, node);
-}
-
-PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
- _buffer_size(0),
- _cbl_mon(NULL),
- _completed_buffers_head(NULL),
- _completed_buffers_tail(NULL),
- _n_completed_buffers(0),
- _process_completed_threshold(0),
- _process_completed(false),
- _fl_lock(NULL),
- _buf_free_list(NULL),
- _buf_free_list_sz(0),
- _fl_owner(NULL),
- _all_active(false),
- _notify_when_complete(notify_when_complete),
- _max_completed_queue(0),
- _completed_queue_padding(0)
-{
- _fl_owner = this;
-}
-
-PtrQueueSet::~PtrQueueSet() {
- // There are presently only a couple (derived) instances ever
- // created, and they are permanent, so no harm currently done by
- // doing nothing here.
-}
-
-void PtrQueueSet::initialize(Monitor* cbl_mon,
- Mutex* fl_lock,
- int process_completed_threshold,
- int max_completed_queue,
- PtrQueueSet *fl_owner) {
- _max_completed_queue = max_completed_queue;
- _process_completed_threshold = process_completed_threshold;
- _completed_queue_padding = 0;
- assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
- _cbl_mon = cbl_mon;
- _fl_lock = fl_lock;
- _fl_owner = (fl_owner != NULL) ? fl_owner : this;
-}
-
-void** PtrQueueSet::allocate_buffer() {
- BufferNode* node = NULL;
- {
- MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
- node = _fl_owner->_buf_free_list;
- if (node != NULL) {
- _fl_owner->_buf_free_list = node->next();
- _fl_owner->_buf_free_list_sz--;
- }
- }
- if (node == NULL) {
- node = BufferNode::allocate(buffer_size());
- } else {
- // Reinitialize buffer obtained from free list.
- node->set_index(0);
- node->set_next(NULL);
- }
- return BufferNode::make_buffer_from_node(node);
-}
-
-void PtrQueueSet::deallocate_buffer(BufferNode* node) {
- MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
- node->set_next(_fl_owner->_buf_free_list);
- _fl_owner->_buf_free_list = node;
- _fl_owner->_buf_free_list_sz++;
-}
-
-void PtrQueueSet::reduce_free_list() {
- assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
- // For now we'll adopt the strategy of deleting half.
- MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
- size_t n = _buf_free_list_sz / 2;
- for (size_t i = 0; i < n; ++i) {
- assert(_buf_free_list != NULL,
- "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
- BufferNode* node = _buf_free_list;
- _buf_free_list = node->next();
- _buf_free_list_sz--;
- BufferNode::deallocate(node);
- }
-}
-
-void PtrQueue::handle_zero_index() {
- assert(index() == 0, "precondition");
-
- // This thread records the full buffer and allocates a new one (while
- // holding the lock if there is one).
- if (_buf != NULL) {
- if (!should_enqueue_buffer()) {
- assert(index() > 0, "the buffer can only be re-used if it's not full");
- return;
- }
-
- if (_lock) {
- assert(_lock->owned_by_self(), "Required.");
-
- BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
- _buf = NULL; // clear shared _buf field
-
- locking_enqueue_completed_buffer(node); // enqueue completed buffer
- assert(_buf == NULL, "multiple enqueuers appear to be racing");
- } else {
- BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
- if (qset()->process_or_enqueue_complete_buffer(node)) {
- // Recycle the buffer. No allocation.
- assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
- assert(capacity() == qset()->buffer_size(), "invariant");
- reset();
- return;
- }
- }
- }
- // Set capacity in case this is the first allocation.
- set_capacity(qset()->buffer_size());
- // Allocate a new buffer.
- _buf = qset()->allocate_buffer();
- reset();
-}
-
-bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
- if (Thread::current()->is_Java_thread()) {
- // We don't lock. It is fine to be epsilon-precise here.
- if (_max_completed_queue == 0 ||
- (_max_completed_queue > 0 &&
- _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) {
- bool b = mut_process_buffer(node);
- if (b) {
- // True here means that the buffer hasn't been deallocated and the caller may reuse it.
- return true;
- }
- }
- }
- // The buffer will be enqueued. The caller will have to get a new one.
- enqueue_complete_buffer(node);
- return false;
-}
-
-void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- cbn->set_next(NULL);
- if (_completed_buffers_tail == NULL) {
- assert(_completed_buffers_head == NULL, "Well-formedness");
- _completed_buffers_head = cbn;
- _completed_buffers_tail = cbn;
- } else {
- _completed_buffers_tail->set_next(cbn);
- _completed_buffers_tail = cbn;
- }
- _n_completed_buffers++;
-
- if (!_process_completed && _process_completed_threshold >= 0 &&
- _n_completed_buffers >= (size_t)_process_completed_threshold) {
- _process_completed = true;
- if (_notify_when_complete) {
- _cbl_mon->notify();
- }
- }
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
-}
-
-size_t PtrQueueSet::completed_buffers_list_length() {
- size_t n = 0;
- BufferNode* cbn = _completed_buffers_head;
- while (cbn != NULL) {
- n++;
- cbn = cbn->next();
- }
- return n;
-}
-
-void PtrQueueSet::assert_completed_buffer_list_len_correct() {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- assert_completed_buffer_list_len_correct_locked();
-}
-
-void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
- guarantee(completed_buffers_list_length() == _n_completed_buffers,
- "Completed buffer length is wrong.");
-}
-
-void PtrQueueSet::set_buffer_size(size_t sz) {
- assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
- _buffer_size = sz;
-}
-
-// Merge lists of buffers. Notify the processing threads.
-// The source queue is emptied as a result. The queues
-// must share the monitor.
-void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
- assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- if (_completed_buffers_tail == NULL) {
- assert(_completed_buffers_head == NULL, "Well-formedness");
- _completed_buffers_head = src->_completed_buffers_head;
- _completed_buffers_tail = src->_completed_buffers_tail;
- } else {
- assert(_completed_buffers_head != NULL, "Well formedness");
- if (src->_completed_buffers_head != NULL) {
- _completed_buffers_tail->set_next(src->_completed_buffers_head);
- _completed_buffers_tail = src->_completed_buffers_tail;
- }
- }
- _n_completed_buffers += src->_n_completed_buffers;
-
- src->_n_completed_buffers = 0;
- src->_completed_buffers_head = NULL;
- src->_completed_buffers_tail = NULL;
-
- assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
- _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
- "Sanity");
-}
-
-void PtrQueueSet::notify_if_necessary() {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- assert(_process_completed_threshold >= 0, "_process_completed is negative");
- if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
- _process_completed = true;
- if (_notify_when_complete)
- _cbl_mon->notify();
- }
-}
--- a/src/hotspot/share/gc/g1/ptrQueue.hpp Fri Aug 17 21:36:02 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,371 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
-#define SHARE_VM_GC_G1_PTRQUEUE_HPP
-
-#include "utilities/align.hpp"
-#include "utilities/sizes.hpp"
-
-// There are various techniques that require threads to be able to log
-// addresses. For example, a generational write barrier might log
-// the addresses of modified old-generation objects. This type supports
-// this operation.
-
-class BufferNode;
-class PtrQueueSet;
-class PtrQueue {
- friend class VMStructs;
-
- // Noncopyable - not defined.
- PtrQueue(const PtrQueue&);
- PtrQueue& operator=(const PtrQueue&);
-
- // The ptr queue set to which this queue belongs.
- PtrQueueSet* const _qset;
-
- // Whether updates should be logged.
- bool _active;
-
- // If true, the queue is permanent, and doesn't need to deallocate
- // its buffer in the destructor (since that obtains a lock which may not
- // be legally locked by then.
- const bool _permanent;
-
- // The (byte) index at which an object was last enqueued. Starts at
- // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
- // Value is always pointer-size aligned.
- size_t _index;
-
- // Size of the current buffer, in bytes.
- // Value is always pointer-size aligned.
- size_t _capacity_in_bytes;
-
- static const size_t _element_size = sizeof(void*);
-
- // Get the capacity, in bytes. The capacity must have been set.
- size_t capacity_in_bytes() const {
- assert(_capacity_in_bytes > 0, "capacity not set");
- return _capacity_in_bytes;
- }
-
- void set_capacity(size_t entries) {
- size_t byte_capacity = index_to_byte_index(entries);
- assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
- "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
- _capacity_in_bytes, byte_capacity);
- _capacity_in_bytes = byte_capacity;
- }
-
- static size_t byte_index_to_index(size_t ind) {
- assert(is_aligned(ind, _element_size), "precondition");
- return ind / _element_size;
- }
-
- static size_t index_to_byte_index(size_t ind) {
- return ind * _element_size;
- }
-
-protected:
- // The buffer.
- void** _buf;
-
- size_t index() const {
- return byte_index_to_index(_index);
- }
-
- void set_index(size_t new_index) {
- size_t byte_index = index_to_byte_index(new_index);
- assert(byte_index <= capacity_in_bytes(), "precondition");
- _index = byte_index;
- }
-
- size_t capacity() const {
- return byte_index_to_index(capacity_in_bytes());
- }
-
- // If there is a lock associated with this buffer, this is that lock.
- Mutex* _lock;
-
- PtrQueueSet* qset() { return _qset; }
- bool is_permanent() const { return _permanent; }
-
- // Process queue entries and release resources.
- void flush_impl();
-
- // Initialize this queue to contain a null buffer, and be part of the
- // given PtrQueueSet.
- PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
-
- // Requires queue flushed or permanent.
- ~PtrQueue();
-
-public:
-
- // Associate a lock with a ptr queue.
- void set_lock(Mutex* lock) { _lock = lock; }
-
- // Forcibly set empty.
- void reset() {
- if (_buf != NULL) {
- _index = capacity_in_bytes();
- }
- }
-
- void enqueue(volatile void* ptr) {
- enqueue((void*)(ptr));
- }
-
- // Enqueues the given "obj".
- void enqueue(void* ptr) {
- if (!_active) return;
- else enqueue_known_active(ptr);
- }
-
- // This method is called when we're doing the zero index handling
- // and gives a chance to the queues to do any pre-enqueueing
- // processing they might want to do on the buffer. It should return
- // true if the buffer should be enqueued, or false if enough
- // entries were cleared from it so that it can be re-used. It should
- // not return false if the buffer is still full (otherwise we can
- // get into an infinite loop).
- virtual bool should_enqueue_buffer() { return true; }
- void handle_zero_index();
- void locking_enqueue_completed_buffer(BufferNode* node);
-
- void enqueue_known_active(void* ptr);
-
- // Return the size of the in-use region.
- size_t size() const {
- size_t result = 0;
- if (_buf != NULL) {
- assert(_index <= capacity_in_bytes(), "Invariant");
- result = byte_index_to_index(capacity_in_bytes() - _index);
- }
- return result;
- }
-
- bool is_empty() const {
- return _buf == NULL || capacity_in_bytes() == _index;
- }
-
- // Set the "active" property of the queue to "b". An enqueue to an
- // inactive thread is a no-op. Setting a queue to inactive resets its
- // log to the empty state.
- void set_active(bool b) {
- _active = b;
- if (!b && _buf != NULL) {
- reset();
- } else if (b && _buf != NULL) {
- assert(index() == capacity(),
- "invariant: queues are empty when activated.");
- }
- }
-
- bool is_active() const { return _active; }
-
- // To support compiler.
-
-protected:
- template<typename Derived>
- static ByteSize byte_offset_of_index() {
- return byte_offset_of(Derived, _index);
- }
-
- static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
-
- template<typename Derived>
- static ByteSize byte_offset_of_buf() {
- return byte_offset_of(Derived, _buf);
- }
-
- static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
-
- template<typename Derived>
- static ByteSize byte_offset_of_active() {
- return byte_offset_of(Derived, _active);
- }
-
- static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
-
-};
-
-class BufferNode {
- size_t _index;
- BufferNode* _next;
- void* _buffer[1]; // Pseudo flexible array member.
-
- BufferNode() : _index(0), _next(NULL) { }
- ~BufferNode() { }
-
- static size_t buffer_offset() {
- return offset_of(BufferNode, _buffer);
- }
-
-public:
- BufferNode* next() const { return _next; }
- void set_next(BufferNode* n) { _next = n; }
- size_t index() const { return _index; }
- void set_index(size_t i) { _index = i; }
-
- // Allocate a new BufferNode with the "buffer" having size elements.
- static BufferNode* allocate(size_t size);
-
- // Free a BufferNode.
- static void deallocate(BufferNode* node);
-
- // Return the BufferNode containing the buffer, after setting its index.
- static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
- BufferNode* node =
- reinterpret_cast<BufferNode*>(
- reinterpret_cast<char*>(buffer) - buffer_offset());
- node->set_index(index);
- return node;
- }
-
- // Return the buffer for node.
- static void** make_buffer_from_node(BufferNode *node) {
- // &_buffer[0] might lead to index out of bounds warnings.
- return reinterpret_cast<void**>(
- reinterpret_cast<char*>(node) + buffer_offset());
- }
-};
-
-// A PtrQueueSet represents resources common to a set of pointer queues.
-// In particular, the individual queues allocate buffers from this shared
-// set, and return completed buffers to the set.
-// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
-class PtrQueueSet {
- // The size of all buffers in the set.
- size_t _buffer_size;
-
-protected:
- Monitor* _cbl_mon; // Protects the fields below.
- BufferNode* _completed_buffers_head;
- BufferNode* _completed_buffers_tail;
- size_t _n_completed_buffers;
- int _process_completed_threshold;
- volatile bool _process_completed;
-
- // This (and the interpretation of the first element as a "next"
- // pointer) are protected by the TLOQ_FL_lock.
- Mutex* _fl_lock;
- BufferNode* _buf_free_list;
- size_t _buf_free_list_sz;
- // Queue set can share a freelist. The _fl_owner variable
- // specifies the owner. It is set to "this" by default.
- PtrQueueSet* _fl_owner;
-
- bool _all_active;
-
- // If true, notify_all on _cbl_mon when the threshold is reached.
- bool _notify_when_complete;
-
- // Maximum number of elements allowed on completed queue: after that,
- // enqueuer does the work itself. Zero indicates no maximum.
- int _max_completed_queue;
- size_t _completed_queue_padding;
-
- size_t completed_buffers_list_length();
- void assert_completed_buffer_list_len_correct_locked();
- void assert_completed_buffer_list_len_correct();
-
-protected:
- // A mutator thread does the the work of processing a buffer.
- // Returns "true" iff the work is complete (and the buffer may be
- // deallocated).
- virtual bool mut_process_buffer(BufferNode* node) {
- ShouldNotReachHere();
- return false;
- }
-
- // Create an empty ptr queue set.
- PtrQueueSet(bool notify_when_complete = false);
- ~PtrQueueSet();
-
- // Because of init-order concerns, we can't pass these as constructor
- // arguments.
- void initialize(Monitor* cbl_mon,
- Mutex* fl_lock,
- int process_completed_threshold,
- int max_completed_queue,
- PtrQueueSet *fl_owner = NULL);
-
-public:
-
- // Return the buffer for a BufferNode of size buffer_size().
- void** allocate_buffer();
-
- // Return an empty buffer to the free list. The node is required
- // to have been allocated with a size of buffer_size().
- void deallocate_buffer(BufferNode* node);
-
- // Declares that "buf" is a complete buffer.
- void enqueue_complete_buffer(BufferNode* node);
-
- // To be invoked by the mutator.
- bool process_or_enqueue_complete_buffer(BufferNode* node);
-
- bool completed_buffers_exist_dirty() {
- return _n_completed_buffers > 0;
- }
-
- bool process_completed_buffers() { return _process_completed; }
- void set_process_completed(bool x) { _process_completed = x; }
-
- bool is_active() { return _all_active; }
-
- // Set the buffer size. Should be called before any "enqueue" operation
- // can be called. And should only be called once.
- void set_buffer_size(size_t sz);
-
- // Get the buffer size. Must have been set.
- size_t buffer_size() const {
- assert(_buffer_size > 0, "buffer size not set");
- return _buffer_size;
- }
-
- // Get/Set the number of completed buffers that triggers log processing.
- void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
- int process_completed_threshold() const { return _process_completed_threshold; }
-
- // Must only be called at a safe point. Indicates that the buffer free
- // list size may be reduced, if that is deemed desirable.
- void reduce_free_list();
-
- size_t completed_buffers_num() { return _n_completed_buffers; }
-
- void merge_bufferlists(PtrQueueSet* src);
-
- void set_max_completed_queue(int m) { _max_completed_queue = m; }
- int max_completed_queue() { return _max_completed_queue; }
-
- void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
- size_t completed_queue_padding() { return _completed_queue_padding; }
-
- // Notify the consumer if the number of buffers crossed the threshold
- void notify_if_necessary();
-};
-
-#endif // SHARE_VM_GC_G1_PTRQUEUE_HPP
--- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp Fri Aug 17 21:36:02 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/threadSMR.hpp"
-#include "runtime/vmThread.hpp"
-
-SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent) :
- // SATB queues are only active during marking cycles. We create
- // them with their active field set to false. If a thread is
- // created during a cycle and its SATB queue needs to be activated
- // before the thread starts running, we'll need to set its active
- // field to true. This must be done in the collector-specific
- // BarrierSet::on_thread_attach() implementation.
- PtrQueue(qset, permanent, false /* active */)
-{ }
-
-void SATBMarkQueue::flush() {
- // Filter now to possibly save work later. If filtering empties the
- // buffer then flush_impl can deallocate the buffer.
- filter();
- flush_impl();
-}
-
-// This method will first apply filtering to the buffer. If filtering
-// retains a small enough collection in the buffer, we can continue to
-// use the buffer as-is, instead of enqueueing and replacing it.
-
-bool SATBMarkQueue::should_enqueue_buffer() {
- assert(_lock == NULL || _lock->owned_by_self(),
- "we should have taken the lock before calling this");
-
- // This method should only be called if there is a non-NULL buffer
- // that is full.
- assert(index() == 0, "pre-condition");
- assert(_buf != NULL, "pre-condition");
-
- filter();
-
- SATBMarkQueueSet* satb_qset = static_cast<SATBMarkQueueSet*>(qset());
- size_t threshold = satb_qset->buffer_enqueue_threshold();
- // Ensure we'll enqueue completely full buffers.
- assert(threshold > 0, "enqueue threshold = 0");
- // Ensure we won't enqueue empty buffers.
- assert(threshold <= capacity(),
- "enqueue threshold " SIZE_FORMAT " exceeds capacity " SIZE_FORMAT,
- threshold, capacity());
- return index() < threshold;
-}
-
-void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
- assert(SafepointSynchronize::is_at_safepoint(),
- "SATB queues must only be processed at safepoints");
- if (_buf != NULL) {
- cl->do_buffer(&_buf[index()], size());
- reset();
- }
-}
-
-#ifndef PRODUCT
-// Helpful for debugging
-
-static void print_satb_buffer(const char* name,
- void** buf,
- size_t index,
- size_t capacity) {
- tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
- " capacity: " SIZE_FORMAT,
- name, p2i(buf), index, capacity);
-}
-
-void SATBMarkQueue::print(const char* name) {
- print_satb_buffer(name, _buf, index(), capacity());
-}
-
-#endif // PRODUCT
-
-SATBMarkQueueSet::SATBMarkQueueSet() :
- PtrQueueSet(),
- _shared_satb_queue(this, true /* permanent */),
- _buffer_enqueue_threshold(0)
-{}
-
-void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int process_completed_threshold,
- uint buffer_enqueue_threshold_percentage,
- Mutex* lock) {
- PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
- _shared_satb_queue.set_lock(lock);
- assert(buffer_size() != 0, "buffer size not initialized");
- // Minimum threshold of 1 ensures enqueuing of completely full buffers.
- size_t size = buffer_size();
- size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100;
- _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1);
-}
-
-#ifdef ASSERT
-void SATBMarkQueueSet::dump_active_states(bool expected_active) {
- log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
- log_error(gc, verify)("Actual SATB active states:");
- log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), satb_queue_for_thread(t).is_active() ? "ACTIVE" : "INACTIVE");
- }
- log_error(gc, verify)(" Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
-}
-
-void SATBMarkQueueSet::verify_active_states(bool expected_active) {
- // Verify queue set state
- if (is_active() != expected_active) {
- dump_active_states(expected_active);
- guarantee(false, "SATB queue set has an unexpected active state");
- }
-
- // Verify thread queue states
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- if (satb_queue_for_thread(t).is_active() != expected_active) {
- dump_active_states(expected_active);
- guarantee(false, "Thread SATB queue has an unexpected active state");
- }
- }
-
- // Verify shared queue state
- if (shared_satb_queue()->is_active() != expected_active) {
- dump_active_states(expected_active);
- guarantee(false, "Shared SATB queue has an unexpected active state");
- }
-}
-#endif // ASSERT
-
-void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-#ifdef ASSERT
- verify_active_states(expected_active);
-#endif // ASSERT
- _all_active = active;
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- satb_queue_for_thread(t).set_active(active);
- }
- shared_satb_queue()->set_active(active);
-}
-
-void SATBMarkQueueSet::filter_thread_buffers() {
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- satb_queue_for_thread(t).filter();
- }
- shared_satb_queue()->filter();
-}
-
-bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
- BufferNode* nd = NULL;
- {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- if (_completed_buffers_head != NULL) {
- nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
- _n_completed_buffers--;
- if (_n_completed_buffers == 0) _process_completed = false;
- }
- }
- if (nd != NULL) {
- void **buf = BufferNode::make_buffer_from_node(nd);
- size_t index = nd->index();
- size_t size = buffer_size();
- assert(index <= size, "invariant");
- cl->do_buffer(buf + index, size - index);
- deallocate_buffer(nd);
- return true;
- } else {
- return false;
- }
-}
-
-#ifndef PRODUCT
-// Helpful for debugging
-
-#define SATB_PRINTER_BUFFER_SIZE 256
-
-void SATBMarkQueueSet::print_all(const char* msg) {
- char buffer[SATB_PRINTER_BUFFER_SIZE];
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-
- tty->cr();
- tty->print_cr("SATB BUFFERS [%s]", msg);
-
- BufferNode* nd = _completed_buffers_head;
- int i = 0;
- while (nd != NULL) {
- void** buf = BufferNode::make_buffer_from_node(nd);
- os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
- print_satb_buffer(buffer, buf, nd->index(), buffer_size());
- nd = nd->next();
- i += 1;
- }
-
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
- satb_queue_for_thread(t).print(buffer);
- }
-
- shared_satb_queue()->print("Shared");
-
- tty->cr();
-}
-#endif // PRODUCT
-
-void SATBMarkQueueSet::abandon_partial_marking() {
- BufferNode* buffers_to_delete = NULL;
- {
- MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
- while (_completed_buffers_head != NULL) {
- BufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next();
- nd->set_next(buffers_to_delete);
- buffers_to_delete = nd;
- }
- _completed_buffers_tail = NULL;
- _n_completed_buffers = 0;
- DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
- }
- while (buffers_to_delete != NULL) {
- BufferNode* nd = buffers_to_delete;
- buffers_to_delete = nd->next();
- deallocate_buffer(nd);
- }
- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- // So we can safely manipulate these queues.
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
- satb_queue_for_thread(t).reset();
- }
- shared_satb_queue()->reset();
-}
--- a/src/hotspot/share/gc/g1/satbMarkQueue.hpp Fri Aug 17 21:36:02 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_SATBMARKQUEUE_HPP
-#define SHARE_VM_GC_G1_SATBMARKQUEUE_HPP
-
-#include "gc/g1/ptrQueue.hpp"
-#include "memory/allocation.hpp"
-
-class JavaThread;
-class SATBMarkQueueSet;
-
-// Base class for processing the contents of a SATB buffer.
-class SATBBufferClosure : public StackObj {
-protected:
- ~SATBBufferClosure() { }
-
-public:
- // Process the SATB entries in the designated buffer range.
- virtual void do_buffer(void** buffer, size_t size) = 0;
-};
-
-// A PtrQueue whose elements are (possibly stale) pointers to object heads.
-class SATBMarkQueue: public PtrQueue {
- friend class SATBMarkQueueSet;
-
-private:
- // Filter out unwanted entries from the buffer.
- inline void filter();
-
- // Removes entries from the buffer that are no longer needed.
- template<typename Filter>
- inline void apply_filter(Filter filter_out);
-
-public:
- SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent = false);
-
- // Process queue entries and free resources.
- void flush();
-
- // Apply cl to the active part of the buffer.
- // Prerequisite: Must be at a safepoint.
- void apply_closure_and_empty(SATBBufferClosure* cl);
-
- // Overrides PtrQueue::should_enqueue_buffer(). See the method's
- // definition for more information.
- virtual bool should_enqueue_buffer();
-
-#ifndef PRODUCT
- // Helpful for debugging
- void print(const char* name);
-#endif // PRODUCT
-
- // Compiler support.
- static ByteSize byte_offset_of_index() {
- return PtrQueue::byte_offset_of_index<SATBMarkQueue>();
- }
- using PtrQueue::byte_width_of_index;
-
- static ByteSize byte_offset_of_buf() {
- return PtrQueue::byte_offset_of_buf<SATBMarkQueue>();
- }
- using PtrQueue::byte_width_of_buf;
-
- static ByteSize byte_offset_of_active() {
- return PtrQueue::byte_offset_of_active<SATBMarkQueue>();
- }
- using PtrQueue::byte_width_of_active;
-
-};
-
-class SATBMarkQueueSet: public PtrQueueSet {
- SATBMarkQueue _shared_satb_queue;
- size_t _buffer_enqueue_threshold;
-
-#ifdef ASSERT
- void dump_active_states(bool expected_active);
- void verify_active_states(bool expected_active);
-#endif // ASSERT
-
-protected:
- SATBMarkQueueSet();
- ~SATBMarkQueueSet() {}
-
- template<typename Filter>
- void apply_filter(Filter filter, SATBMarkQueue* queue) {
- queue->apply_filter(filter);
- }
-
- void initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int process_completed_threshold,
- uint buffer_enqueue_threshold_percentage,
- Mutex* lock);
-
-public:
- virtual SATBMarkQueue& satb_queue_for_thread(JavaThread* const t) const = 0;
-
- // Apply "set_active(active)" to all SATB queues in the set. It should be
- // called only with the world stopped. The method will assert that the
- // SATB queues of all threads it visits, as well as the SATB queue
- // set itself, has an active value same as expected_active.
- void set_active_all_threads(bool active, bool expected_active);
-
- size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
- virtual void filter(SATBMarkQueue* queue) = 0;
-
- // Filter all the currently-active SATB buffers.
- void filter_thread_buffers();
-
- // If there exists some completed buffer, pop and process it, and
- // return true. Otherwise return false. Processing a buffer
- // consists of applying the closure to the active range of the
- // buffer; the leading entries may be excluded due to filtering.
- bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
-
-#ifndef PRODUCT
- // Helpful for debugging
- void print_all(const char* msg);
-#endif // PRODUCT
-
- SATBMarkQueue* shared_satb_queue() { return &_shared_satb_queue; }
-
- // If a marking is being abandoned, reset any unprocessed log buffers.
- void abandon_partial_marking();
-};
-
-inline void SATBMarkQueue::filter() {
- static_cast<SATBMarkQueueSet*>(qset())->filter(this);
-}
-
-// Removes entries from the buffer that are no longer needed, as
-// determined by filter. If e is a void* entry in the buffer,
-// filter_out(e) must be a valid expression whose value is convertible
-// to bool. Entries are removed (filtered out) if the result is true,
-// retained if false.
-template<typename Filter>
-inline void SATBMarkQueue::apply_filter(Filter filter_out) {
- void** buf = this->_buf;
-
- if (buf == NULL) {
- // nothing to do
- return;
- }
-
- // Two-fingered compaction toward the end.
- void** src = &buf[this->index()];
- void** dst = &buf[this->capacity()];
- assert(src <= dst, "invariant");
- for ( ; src < dst; ++src) {
- // Search low to high for an entry to keep.
- void* entry = *src;
- if (!filter_out(entry)) {
- // Found keeper. Search high to low for an entry to discard.
- while (src < --dst) {
- if (filter_out(*dst)) {
- *dst = entry; // Replace discard with keeper.
- break;
- }
- }
- // If discard search failed (src == dst), the outer loop will also end.
- }
- }
- // dst points to the lowest retained entry, or the end of the buffer
- // if all the entries were filtered out.
- this->set_index(dst - buf);
-}
-
-#endif // SHARE_VM_GC_G1_SATBMARKQUEUE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Sat Aug 18 13:59:25 2018 -0400
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/ptrQueue.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+
+#include <new>
+
+PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
+ _qset(qset),
+ _active(active),
+ _permanent(permanent),
+ _index(0),
+ _capacity_in_bytes(0),
+ _buf(NULL),
+ _lock(NULL)
+{}
+
+PtrQueue::~PtrQueue() {
+ assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
+}
+
+void PtrQueue::flush_impl() {
+ if (_buf != NULL) {
+ BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
+ if (is_empty()) {
+ // No work to do.
+ qset()->deallocate_buffer(node);
+ } else {
+ qset()->enqueue_complete_buffer(node);
+ }
+ _buf = NULL;
+ set_index(0);
+ }
+}
+
+
+void PtrQueue::enqueue_known_active(void* ptr) {
+ while (_index == 0) {
+ handle_zero_index();
+ }
+
+ assert(_buf != NULL, "postcondition");
+ assert(index() > 0, "postcondition");
+ assert(index() <= capacity(), "invariant");
+ _index -= _element_size;
+ _buf[index()] = ptr;
+}
+
+void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
+ assert(_lock->owned_by_self(), "Required.");
+ qset()->enqueue_complete_buffer(node);
+}
+
+
+BufferNode* BufferNode::allocate(size_t size) {
+ size_t byte_size = size * sizeof(void*);
+ void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
+ return new (data) BufferNode;
+}
+
+void BufferNode::deallocate(BufferNode* node) {
+ node->~BufferNode();
+ FREE_C_HEAP_ARRAY(char, node);
+}
+
+PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
+ _buffer_size(0),
+ _cbl_mon(NULL),
+ _completed_buffers_head(NULL),
+ _completed_buffers_tail(NULL),
+ _n_completed_buffers(0),
+ _process_completed_threshold(0),
+ _process_completed(false),
+ _fl_lock(NULL),
+ _buf_free_list(NULL),
+ _buf_free_list_sz(0),
+ _fl_owner(NULL),
+ _all_active(false),
+ _notify_when_complete(notify_when_complete),
+ _max_completed_queue(0),
+ _completed_queue_padding(0)
+{
+ _fl_owner = this;
+}
+
+PtrQueueSet::~PtrQueueSet() {
+ // There are presently only a couple (derived) instances ever
+ // created, and they are permanent, so no harm currently done by
+ // doing nothing here.
+}
+
+void PtrQueueSet::initialize(Monitor* cbl_mon,
+ Mutex* fl_lock,
+ int process_completed_threshold,
+ int max_completed_queue,
+ PtrQueueSet *fl_owner) {
+ _max_completed_queue = max_completed_queue;
+ _process_completed_threshold = process_completed_threshold;
+ _completed_queue_padding = 0;
+ assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
+ _cbl_mon = cbl_mon;
+ _fl_lock = fl_lock;
+ _fl_owner = (fl_owner != NULL) ? fl_owner : this;
+}
+
+void** PtrQueueSet::allocate_buffer() {
+ BufferNode* node = NULL;
+ {
+ MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
+ node = _fl_owner->_buf_free_list;
+ if (node != NULL) {
+ _fl_owner->_buf_free_list = node->next();
+ _fl_owner->_buf_free_list_sz--;
+ }
+ }
+ if (node == NULL) {
+ node = BufferNode::allocate(buffer_size());
+ } else {
+ // Reinitialize buffer obtained from free list.
+ node->set_index(0);
+ node->set_next(NULL);
+ }
+ return BufferNode::make_buffer_from_node(node);
+}
+
+void PtrQueueSet::deallocate_buffer(BufferNode* node) {
+ MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
+ node->set_next(_fl_owner->_buf_free_list);
+ _fl_owner->_buf_free_list = node;
+ _fl_owner->_buf_free_list_sz++;
+}
+
+void PtrQueueSet::reduce_free_list() {
+ assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
+ // For now we'll adopt the strategy of deleting half.
+ MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
+ size_t n = _buf_free_list_sz / 2;
+ for (size_t i = 0; i < n; ++i) {
+ assert(_buf_free_list != NULL,
+ "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
+ BufferNode* node = _buf_free_list;
+ _buf_free_list = node->next();
+ _buf_free_list_sz--;
+ BufferNode::deallocate(node);
+ }
+}
+
+void PtrQueue::handle_zero_index() {
+ assert(index() == 0, "precondition");
+
+ // This thread records the full buffer and allocates a new one (while
+ // holding the lock if there is one).
+ if (_buf != NULL) {
+ if (!should_enqueue_buffer()) {
+ assert(index() > 0, "the buffer can only be re-used if it's not full");
+ return;
+ }
+
+ if (_lock) {
+ assert(_lock->owned_by_self(), "Required.");
+
+ BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
+ _buf = NULL; // clear shared _buf field
+
+ locking_enqueue_completed_buffer(node); // enqueue completed buffer
+ assert(_buf == NULL, "multiple enqueuers appear to be racing");
+ } else {
+ BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
+ if (qset()->process_or_enqueue_complete_buffer(node)) {
+ // Recycle the buffer. No allocation.
+ assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
+ assert(capacity() == qset()->buffer_size(), "invariant");
+ reset();
+ return;
+ }
+ }
+ }
+ // Set capacity in case this is the first allocation.
+ set_capacity(qset()->buffer_size());
+ // Allocate a new buffer.
+ _buf = qset()->allocate_buffer();
+ reset();
+}
+
+bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
+ if (Thread::current()->is_Java_thread()) {
+ // We don't lock. It is fine to be epsilon-precise here.
+ if (_max_completed_queue == 0 ||
+ (_max_completed_queue > 0 &&
+ _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) {
+ bool b = mut_process_buffer(node);
+ if (b) {
+ // True here means that the buffer hasn't been deallocated and the caller may reuse it.
+ return true;
+ }
+ }
+ }
+ // The buffer will be enqueued. The caller will have to get a new one.
+ enqueue_complete_buffer(node);
+ return false;
+}
+
+void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ cbn->set_next(NULL);
+ if (_completed_buffers_tail == NULL) {
+ assert(_completed_buffers_head == NULL, "Well-formedness");
+ _completed_buffers_head = cbn;
+ _completed_buffers_tail = cbn;
+ } else {
+ _completed_buffers_tail->set_next(cbn);
+ _completed_buffers_tail = cbn;
+ }
+ _n_completed_buffers++;
+
+ if (!_process_completed && _process_completed_threshold >= 0 &&
+ _n_completed_buffers >= (size_t)_process_completed_threshold) {
+ _process_completed = true;
+ if (_notify_when_complete) {
+ _cbl_mon->notify();
+ }
+ }
+ DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
+}
+
+size_t PtrQueueSet::completed_buffers_list_length() {
+ size_t n = 0;
+ BufferNode* cbn = _completed_buffers_head;
+ while (cbn != NULL) {
+ n++;
+ cbn = cbn->next();
+ }
+ return n;
+}
+
+void PtrQueueSet::assert_completed_buffer_list_len_correct() {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ assert_completed_buffer_list_len_correct_locked();
+}
+
+void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
+ guarantee(completed_buffers_list_length() == _n_completed_buffers,
+ "Completed buffer length is wrong.");
+}
+
+void PtrQueueSet::set_buffer_size(size_t sz) {
+ assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
+ _buffer_size = sz;
+}
+
+// Merge lists of buffers. Notify the processing threads.
+// The source queue is emptied as a result. The queues
+// must share the monitor.
+void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
+ assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ if (_completed_buffers_tail == NULL) {
+ assert(_completed_buffers_head == NULL, "Well-formedness");
+ _completed_buffers_head = src->_completed_buffers_head;
+ _completed_buffers_tail = src->_completed_buffers_tail;
+ } else {
+ assert(_completed_buffers_head != NULL, "Well formedness");
+ if (src->_completed_buffers_head != NULL) {
+ _completed_buffers_tail->set_next(src->_completed_buffers_head);
+ _completed_buffers_tail = src->_completed_buffers_tail;
+ }
+ }
+ _n_completed_buffers += src->_n_completed_buffers;
+
+ src->_n_completed_buffers = 0;
+ src->_completed_buffers_head = NULL;
+ src->_completed_buffers_tail = NULL;
+
+ assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
+ _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
+ "Sanity");
+}
+
+void PtrQueueSet::notify_if_necessary() {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ assert(_process_completed_threshold >= 0, "_process_completed is negative");
+ if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
+ _process_completed = true;
+ if (_notify_when_complete)
+ _cbl_mon->notify();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/ptrQueue.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
+#define SHARE_GC_SHARED_PTRQUEUE_HPP
+
+#include "utilities/align.hpp"
+#include "utilities/sizes.hpp"
+
+// There are various techniques that require threads to be able to log
+// addresses. For example, a generational write barrier might log
+// the addresses of modified old-generation objects. This type supports
+// this operation.
+
+class BufferNode;
+class PtrQueueSet;
+class PtrQueue {
+ friend class VMStructs;
+
+ // Noncopyable - not defined.
+ PtrQueue(const PtrQueue&);
+ PtrQueue& operator=(const PtrQueue&);
+
+ // The ptr queue set to which this queue belongs.
+ PtrQueueSet* const _qset;
+
+ // Whether updates should be logged.
+ bool _active;
+
+ // If true, the queue is permanent, and doesn't need to deallocate
+ // its buffer in the destructor (since that obtains a lock which may not
+ // be legally locked by then.
+ const bool _permanent;
+
+ // The (byte) index at which an object was last enqueued. Starts at
+ // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
+ // Value is always pointer-size aligned.
+ size_t _index;
+
+ // Size of the current buffer, in bytes.
+ // Value is always pointer-size aligned.
+ size_t _capacity_in_bytes;
+
+ static const size_t _element_size = sizeof(void*);
+
+ // Get the capacity, in bytes. The capacity must have been set.
+ size_t capacity_in_bytes() const {
+ assert(_capacity_in_bytes > 0, "capacity not set");
+ return _capacity_in_bytes;
+ }
+
+ void set_capacity(size_t entries) {
+ size_t byte_capacity = index_to_byte_index(entries);
+ assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
+ "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
+ _capacity_in_bytes, byte_capacity);
+ _capacity_in_bytes = byte_capacity;
+ }
+
+ static size_t byte_index_to_index(size_t ind) {
+ assert(is_aligned(ind, _element_size), "precondition");
+ return ind / _element_size;
+ }
+
+ static size_t index_to_byte_index(size_t ind) {
+ return ind * _element_size;
+ }
+
+protected:
+ // The buffer.
+ void** _buf;
+
+ size_t index() const {
+ return byte_index_to_index(_index);
+ }
+
+ void set_index(size_t new_index) {
+ size_t byte_index = index_to_byte_index(new_index);
+ assert(byte_index <= capacity_in_bytes(), "precondition");
+ _index = byte_index;
+ }
+
+ size_t capacity() const {
+ return byte_index_to_index(capacity_in_bytes());
+ }
+
+ // If there is a lock associated with this buffer, this is that lock.
+ Mutex* _lock;
+
+ PtrQueueSet* qset() { return _qset; }
+ bool is_permanent() const { return _permanent; }
+
+ // Process queue entries and release resources.
+ void flush_impl();
+
+ // Initialize this queue to contain a null buffer, and be part of the
+ // given PtrQueueSet.
+ PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
+
+ // Requires queue flushed or permanent.
+ ~PtrQueue();
+
+public:
+
+ // Associate a lock with a ptr queue.
+ void set_lock(Mutex* lock) { _lock = lock; }
+
+ // Forcibly set empty.
+ void reset() {
+ if (_buf != NULL) {
+ _index = capacity_in_bytes();
+ }
+ }
+
+ void enqueue(volatile void* ptr) {
+ enqueue((void*)(ptr));
+ }
+
+ // Enqueues the given "obj".
+ void enqueue(void* ptr) {
+ if (!_active) return;
+ else enqueue_known_active(ptr);
+ }
+
+ // This method is called when we're doing the zero index handling
+ // and gives a chance to the queues to do any pre-enqueueing
+ // processing they might want to do on the buffer. It should return
+ // true if the buffer should be enqueued, or false if enough
+ // entries were cleared from it so that it can be re-used. It should
+ // not return false if the buffer is still full (otherwise we can
+ // get into an infinite loop).
+ virtual bool should_enqueue_buffer() { return true; }
+ void handle_zero_index();
+ void locking_enqueue_completed_buffer(BufferNode* node);
+
+ void enqueue_known_active(void* ptr);
+
+ // Return the size of the in-use region.
+ size_t size() const {
+ size_t result = 0;
+ if (_buf != NULL) {
+ assert(_index <= capacity_in_bytes(), "Invariant");
+ result = byte_index_to_index(capacity_in_bytes() - _index);
+ }
+ return result;
+ }
+
+ bool is_empty() const {
+ return _buf == NULL || capacity_in_bytes() == _index;
+ }
+
+ // Set the "active" property of the queue to "b". An enqueue to an
+ // inactive thread is a no-op. Setting a queue to inactive resets its
+ // log to the empty state.
+ void set_active(bool b) {
+ _active = b;
+ if (!b && _buf != NULL) {
+ reset();
+ } else if (b && _buf != NULL) {
+ assert(index() == capacity(),
+ "invariant: queues are empty when activated.");
+ }
+ }
+
+ bool is_active() const { return _active; }
+
+ // To support compiler.
+
+protected:
+ template<typename Derived>
+ static ByteSize byte_offset_of_index() {
+ return byte_offset_of(Derived, _index);
+ }
+
+ static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
+
+ template<typename Derived>
+ static ByteSize byte_offset_of_buf() {
+ return byte_offset_of(Derived, _buf);
+ }
+
+ static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
+
+ template<typename Derived>
+ static ByteSize byte_offset_of_active() {
+ return byte_offset_of(Derived, _active);
+ }
+
+ static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
+
+};
+
+class BufferNode {
+ size_t _index;
+ BufferNode* _next;
+ void* _buffer[1]; // Pseudo flexible array member.
+
+ BufferNode() : _index(0), _next(NULL) { }
+ ~BufferNode() { }
+
+ static size_t buffer_offset() {
+ return offset_of(BufferNode, _buffer);
+ }
+
+public:
+ BufferNode* next() const { return _next; }
+ void set_next(BufferNode* n) { _next = n; }
+ size_t index() const { return _index; }
+ void set_index(size_t i) { _index = i; }
+
+ // Allocate a new BufferNode with the "buffer" having size elements.
+ static BufferNode* allocate(size_t size);
+
+ // Free a BufferNode.
+ static void deallocate(BufferNode* node);
+
+ // Return the BufferNode containing the buffer, after setting its index.
+ static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
+ BufferNode* node =
+ reinterpret_cast<BufferNode*>(
+ reinterpret_cast<char*>(buffer) - buffer_offset());
+ node->set_index(index);
+ return node;
+ }
+
+ // Return the buffer for node.
+ static void** make_buffer_from_node(BufferNode *node) {
+ // &_buffer[0] might lead to index out of bounds warnings.
+ return reinterpret_cast<void**>(
+ reinterpret_cast<char*>(node) + buffer_offset());
+ }
+};
+
+// A PtrQueueSet represents resources common to a set of pointer queues.
+// In particular, the individual queues allocate buffers from this shared
+// set, and return completed buffers to the set.
+// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
+class PtrQueueSet {
+ // The size of all buffers in the set.
+ size_t _buffer_size;
+
+protected:
+ Monitor* _cbl_mon; // Protects the fields below.
+ BufferNode* _completed_buffers_head;
+ BufferNode* _completed_buffers_tail;
+ size_t _n_completed_buffers;
+ int _process_completed_threshold;
+ volatile bool _process_completed;
+
+ // This (and the interpretation of the first element as a "next"
+ // pointer) are protected by the TLOQ_FL_lock.
+ Mutex* _fl_lock;
+ BufferNode* _buf_free_list;
+ size_t _buf_free_list_sz;
+ // Queue set can share a freelist. The _fl_owner variable
+ // specifies the owner. It is set to "this" by default.
+ PtrQueueSet* _fl_owner;
+
+ bool _all_active;
+
+ // If true, notify_all on _cbl_mon when the threshold is reached.
+ bool _notify_when_complete;
+
+ // Maximum number of elements allowed on completed queue: after that,
+ // enqueuer does the work itself. Zero indicates no maximum.
+ int _max_completed_queue;
+ size_t _completed_queue_padding;
+
+ size_t completed_buffers_list_length();
+ void assert_completed_buffer_list_len_correct_locked();
+ void assert_completed_buffer_list_len_correct();
+
+protected:
+ // A mutator thread does the the work of processing a buffer.
+ // Returns "true" iff the work is complete (and the buffer may be
+ // deallocated).
+ virtual bool mut_process_buffer(BufferNode* node) {
+ ShouldNotReachHere();
+ return false;
+ }
+
+ // Create an empty ptr queue set.
+ PtrQueueSet(bool notify_when_complete = false);
+ ~PtrQueueSet();
+
+ // Because of init-order concerns, we can't pass these as constructor
+ // arguments.
+ void initialize(Monitor* cbl_mon,
+ Mutex* fl_lock,
+ int process_completed_threshold,
+ int max_completed_queue,
+ PtrQueueSet *fl_owner = NULL);
+
+public:
+
+ // Return the buffer for a BufferNode of size buffer_size().
+ void** allocate_buffer();
+
+ // Return an empty buffer to the free list. The node is required
+ // to have been allocated with a size of buffer_size().
+ void deallocate_buffer(BufferNode* node);
+
+ // Declares that "buf" is a complete buffer.
+ void enqueue_complete_buffer(BufferNode* node);
+
+ // To be invoked by the mutator.
+ bool process_or_enqueue_complete_buffer(BufferNode* node);
+
+ bool completed_buffers_exist_dirty() {
+ return _n_completed_buffers > 0;
+ }
+
+ bool process_completed_buffers() { return _process_completed; }
+ void set_process_completed(bool x) { _process_completed = x; }
+
+ bool is_active() { return _all_active; }
+
+ // Set the buffer size. Should be called before any "enqueue" operation
+ // can be called. And should only be called once.
+ void set_buffer_size(size_t sz);
+
+ // Get the buffer size. Must have been set.
+ size_t buffer_size() const {
+ assert(_buffer_size > 0, "buffer size not set");
+ return _buffer_size;
+ }
+
+ // Get/Set the number of completed buffers that triggers log processing.
+ void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
+ int process_completed_threshold() const { return _process_completed_threshold; }
+
+ // Must only be called at a safe point. Indicates that the buffer free
+ // list size may be reduced, if that is deemed desirable.
+ void reduce_free_list();
+
+ size_t completed_buffers_num() { return _n_completed_buffers; }
+
+ void merge_bufferlists(PtrQueueSet* src);
+
+ void set_max_completed_queue(int m) { _max_completed_queue = m; }
+ int max_completed_queue() { return _max_completed_queue; }
+
+ void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
+ size_t completed_queue_padding() { return _completed_queue_padding; }
+
+ // Notify the consumer if the number of buffers crossed the threshold
+ void notify_if_necessary();
+};
+
+#endif // SHARE_GC_SHARED_PTRQUEUE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Sat Aug 18 13:59:25 2018 -0400
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/threadSMR.hpp"
+#include "runtime/vmThread.hpp"
+
+SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent) :
+ // SATB queues are only active during marking cycles. We create
+ // them with their active field set to false. If a thread is
+ // created during a cycle and its SATB queue needs to be activated
+ // before the thread starts running, we'll need to set its active
+ // field to true. This must be done in the collector-specific
+ // BarrierSet::on_thread_attach() implementation.
+ PtrQueue(qset, permanent, false /* active */)
+{ }
+
+void SATBMarkQueue::flush() {
+ // Filter now to possibly save work later. If filtering empties the
+ // buffer then flush_impl can deallocate the buffer.
+ filter();
+ flush_impl();
+}
+
+// This method will first apply filtering to the buffer. If filtering
+// retains a small enough collection in the buffer, we can continue to
+// use the buffer as-is, instead of enqueueing and replacing it.
+
+bool SATBMarkQueue::should_enqueue_buffer() {
+ assert(_lock == NULL || _lock->owned_by_self(),
+ "we should have taken the lock before calling this");
+
+ // This method should only be called if there is a non-NULL buffer
+ // that is full.
+ assert(index() == 0, "pre-condition");
+ assert(_buf != NULL, "pre-condition");
+
+ filter();
+
+ SATBMarkQueueSet* satb_qset = static_cast<SATBMarkQueueSet*>(qset());
+ size_t threshold = satb_qset->buffer_enqueue_threshold();
+ // Ensure we'll enqueue completely full buffers.
+ assert(threshold > 0, "enqueue threshold = 0");
+ // Ensure we won't enqueue empty buffers.
+ assert(threshold <= capacity(),
+ "enqueue threshold " SIZE_FORMAT " exceeds capacity " SIZE_FORMAT,
+ threshold, capacity());
+ return index() < threshold;
+}
+
+void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
+ assert(SafepointSynchronize::is_at_safepoint(),
+ "SATB queues must only be processed at safepoints");
+ if (_buf != NULL) {
+ cl->do_buffer(&_buf[index()], size());
+ reset();
+ }
+}
+
+#ifndef PRODUCT
+// Helpful for debugging
+
+static void print_satb_buffer(const char* name,
+ void** buf,
+ size_t index,
+ size_t capacity) {
+ tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
+ " capacity: " SIZE_FORMAT,
+ name, p2i(buf), index, capacity);
+}
+
+void SATBMarkQueue::print(const char* name) {
+ print_satb_buffer(name, _buf, index(), capacity());
+}
+
+#endif // PRODUCT
+
+SATBMarkQueueSet::SATBMarkQueueSet() :
+ PtrQueueSet(),
+ _shared_satb_queue(this, true /* permanent */),
+ _buffer_enqueue_threshold(0)
+{}
+
+void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+ int process_completed_threshold,
+ uint buffer_enqueue_threshold_percentage,
+ Mutex* lock) {
+ PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
+ _shared_satb_queue.set_lock(lock);
+ assert(buffer_size() != 0, "buffer size not initialized");
+ // Minimum threshold of 1 ensures enqueuing of completely full buffers.
+ size_t size = buffer_size();
+ size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100;
+ _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1);
+}
+
+#ifdef ASSERT
+void SATBMarkQueueSet::dump_active_states(bool expected_active) {
+ log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
+ log_error(gc, verify)("Actual SATB active states:");
+ log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), satb_queue_for_thread(t).is_active() ? "ACTIVE" : "INACTIVE");
+ }
+ log_error(gc, verify)(" Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
+}
+
+void SATBMarkQueueSet::verify_active_states(bool expected_active) {
+ // Verify queue set state
+ if (is_active() != expected_active) {
+ dump_active_states(expected_active);
+ guarantee(false, "SATB queue set has an unexpected active state");
+ }
+
+ // Verify thread queue states
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ if (satb_queue_for_thread(t).is_active() != expected_active) {
+ dump_active_states(expected_active);
+ guarantee(false, "Thread SATB queue has an unexpected active state");
+ }
+ }
+
+ // Verify shared queue state
+ if (shared_satb_queue()->is_active() != expected_active) {
+ dump_active_states(expected_active);
+ guarantee(false, "Shared SATB queue has an unexpected active state");
+ }
+}
+#endif // ASSERT
+
+void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+#ifdef ASSERT
+ verify_active_states(expected_active);
+#endif // ASSERT
+ _all_active = active;
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ satb_queue_for_thread(t).set_active(active);
+ }
+ shared_satb_queue()->set_active(active);
+}
+
+void SATBMarkQueueSet::filter_thread_buffers() {
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ satb_queue_for_thread(t).filter();
+ }
+ shared_satb_queue()->filter();
+}
+
+bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
+ BufferNode* nd = NULL;
+ {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ if (_completed_buffers_head != NULL) {
+ nd = _completed_buffers_head;
+ _completed_buffers_head = nd->next();
+ if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
+ _n_completed_buffers--;
+ if (_n_completed_buffers == 0) _process_completed = false;
+ }
+ }
+ if (nd != NULL) {
+ void **buf = BufferNode::make_buffer_from_node(nd);
+ size_t index = nd->index();
+ size_t size = buffer_size();
+ assert(index <= size, "invariant");
+ cl->do_buffer(buf + index, size - index);
+ deallocate_buffer(nd);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+#ifndef PRODUCT
+// Helpful for debugging
+
+#define SATB_PRINTER_BUFFER_SIZE 256
+
+void SATBMarkQueueSet::print_all(const char* msg) {
+ char buffer[SATB_PRINTER_BUFFER_SIZE];
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+
+ tty->cr();
+ tty->print_cr("SATB BUFFERS [%s]", msg);
+
+ BufferNode* nd = _completed_buffers_head;
+ int i = 0;
+ while (nd != NULL) {
+ void** buf = BufferNode::make_buffer_from_node(nd);
+ os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
+ print_satb_buffer(buffer, buf, nd->index(), buffer_size());
+ nd = nd->next();
+ i += 1;
+ }
+
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
+ satb_queue_for_thread(t).print(buffer);
+ }
+
+ shared_satb_queue()->print("Shared");
+
+ tty->cr();
+}
+#endif // PRODUCT
+
+void SATBMarkQueueSet::abandon_partial_marking() {
+ BufferNode* buffers_to_delete = NULL;
+ {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ while (_completed_buffers_head != NULL) {
+ BufferNode* nd = _completed_buffers_head;
+ _completed_buffers_head = nd->next();
+ nd->set_next(buffers_to_delete);
+ buffers_to_delete = nd;
+ }
+ _completed_buffers_tail = NULL;
+ _n_completed_buffers = 0;
+ DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
+ }
+ while (buffers_to_delete != NULL) {
+ BufferNode* nd = buffers_to_delete;
+ buffers_to_delete = nd->next();
+ deallocate_buffer(nd);
+ }
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+ // So we can safely manipulate these queues.
+ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
+ satb_queue_for_thread(t).reset();
+ }
+ shared_satb_queue()->reset();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SATBMARKQUEUE_HPP
+#define SHARE_GC_SHARED_SATBMARKQUEUE_HPP
+
+#include "gc/shared/ptrQueue.hpp"
+#include "memory/allocation.hpp"
+
+class JavaThread;
+class SATBMarkQueueSet;
+
+// Base class for processing the contents of a SATB buffer.
+class SATBBufferClosure : public StackObj {
+protected:
+ ~SATBBufferClosure() { }
+
+public:
+ // Process the SATB entries in the designated buffer range.
+ virtual void do_buffer(void** buffer, size_t size) = 0;
+};
+
+// A PtrQueue whose elements are (possibly stale) pointers to object heads.
+class SATBMarkQueue: public PtrQueue {
+ friend class SATBMarkQueueSet;
+
+private:
+ // Filter out unwanted entries from the buffer.
+ inline void filter();
+
+ // Removes entries from the buffer that are no longer needed.
+ template<typename Filter>
+ inline void apply_filter(Filter filter_out);
+
+public:
+ SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent = false);
+
+ // Process queue entries and free resources.
+ void flush();
+
+ // Apply cl to the active part of the buffer.
+ // Prerequisite: Must be at a safepoint.
+ void apply_closure_and_empty(SATBBufferClosure* cl);
+
+ // Overrides PtrQueue::should_enqueue_buffer(). See the method's
+ // definition for more information.
+ virtual bool should_enqueue_buffer();
+
+#ifndef PRODUCT
+ // Helpful for debugging
+ void print(const char* name);
+#endif // PRODUCT
+
+ // Compiler support.
+ static ByteSize byte_offset_of_index() {
+ return PtrQueue::byte_offset_of_index<SATBMarkQueue>();
+ }
+ using PtrQueue::byte_width_of_index;
+
+ static ByteSize byte_offset_of_buf() {
+ return PtrQueue::byte_offset_of_buf<SATBMarkQueue>();
+ }
+ using PtrQueue::byte_width_of_buf;
+
+ static ByteSize byte_offset_of_active() {
+ return PtrQueue::byte_offset_of_active<SATBMarkQueue>();
+ }
+ using PtrQueue::byte_width_of_active;
+
+};
+
+class SATBMarkQueueSet: public PtrQueueSet {
+ SATBMarkQueue _shared_satb_queue;
+ size_t _buffer_enqueue_threshold;
+
+#ifdef ASSERT
+ void dump_active_states(bool expected_active);
+ void verify_active_states(bool expected_active);
+#endif // ASSERT
+
+protected:
+ SATBMarkQueueSet();
+ ~SATBMarkQueueSet() {}
+
+ template<typename Filter>
+ void apply_filter(Filter filter, SATBMarkQueue* queue) {
+ queue->apply_filter(filter);
+ }
+
+ void initialize(Monitor* cbl_mon, Mutex* fl_lock,
+ int process_completed_threshold,
+ uint buffer_enqueue_threshold_percentage,
+ Mutex* lock);
+
+public:
+ virtual SATBMarkQueue& satb_queue_for_thread(JavaThread* const t) const = 0;
+
+ // Apply "set_active(active)" to all SATB queues in the set. It should be
+ // called only with the world stopped. The method will assert that the
+ // SATB queues of all threads it visits, as well as the SATB queue
+ // set itself, has an active value same as expected_active.
+ void set_active_all_threads(bool active, bool expected_active);
+
+ size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
+ virtual void filter(SATBMarkQueue* queue) = 0;
+
+ // Filter all the currently-active SATB buffers.
+ void filter_thread_buffers();
+
+ // If there exists some completed buffer, pop and process it, and
+ // return true. Otherwise return false. Processing a buffer
+ // consists of applying the closure to the active range of the
+ // buffer; the leading entries may be excluded due to filtering.
+ bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
+
+#ifndef PRODUCT
+ // Helpful for debugging
+ void print_all(const char* msg);
+#endif // PRODUCT
+
+ SATBMarkQueue* shared_satb_queue() { return &_shared_satb_queue; }
+
+ // If a marking is being abandoned, reset any unprocessed log buffers.
+ void abandon_partial_marking();
+};
+
+inline void SATBMarkQueue::filter() {
+ static_cast<SATBMarkQueueSet*>(qset())->filter(this);
+}
+
+// Removes entries from the buffer that are no longer needed, as
+// determined by filter. If e is a void* entry in the buffer,
+// filter_out(e) must be a valid expression whose value is convertible
+// to bool. Entries are removed (filtered out) if the result is true,
+// retained if false.
+template<typename Filter>
+inline void SATBMarkQueue::apply_filter(Filter filter_out) {
+ void** buf = this->_buf;
+
+ if (buf == NULL) {
+ // nothing to do
+ return;
+ }
+
+ // Two-fingered compaction toward the end.
+ void** src = &buf[this->index()];
+ void** dst = &buf[this->capacity()];
+ assert(src <= dst, "invariant");
+ for ( ; src < dst; ++src) {
+ // Search low to high for an entry to keep.
+ void* entry = *src;
+ if (!filter_out(entry)) {
+ // Found keeper. Search high to low for an entry to discard.
+ while (src < --dst) {
+ if (filter_out(*dst)) {
+ *dst = entry; // Replace discard with keeper.
+ break;
+ }
+ }
+ // If discard search failed (src == dst), the outer loop will also end.
+ }
+ }
+ // dst points to the lowest retained entry, or the end of the buffer
+ // if all the entries were filtered out.
+ this->set_index(dst - buf);
+}
+
+#endif // SHARE_GC_SHARED_SATBMARKQUEUE_HPP
--- a/src/hotspot/share/precompiled/precompiled.hpp Fri Aug 17 21:36:02 2018 -0700
+++ b/src/hotspot/share/precompiled/precompiled.hpp Sat Aug 18 13:59:25 2018 -0400
@@ -101,8 +101,10 @@
# include "gc/shared/generationCounters.hpp"
# include "gc/shared/jvmFlagConstraintsGC.hpp"
# include "gc/shared/modRefBarrierSet.hpp"
+# include "gc/shared/ptrQueue.hpp"
# include "gc/shared/referencePolicy.hpp"
# include "gc/shared/referenceProcessor.hpp"
+# include "gc/shared/satbMarkQueue.hpp"
# include "gc/shared/space.hpp"
# include "gc/shared/spaceDecorator.hpp"
# include "gc/shared/taskqueue.hpp"
@@ -309,8 +311,6 @@
# include "gc/g1/g1OopClosures.hpp"
# include "gc/g1/g1_globals.hpp"
# include "gc/g1/jvmFlagConstraintsG1.hpp"
-# include "gc/g1/ptrQueue.hpp"
-# include "gc/g1/satbMarkQueue.hpp"
#endif // INCLUDE_G1GC
#if INCLUDE_PARALLELGC
# include "gc/parallel/gcAdaptivePolicyCounters.hpp"