8178836: Improve PtrQueue index abstraction
authorkbarrett
Mon, 08 May 2017 07:16:10 -0400
changeset 46443 cdb638b5ec53
parent 46442 b833f0dbbb5b
child 46444 677be3444372
8178836: Improve PtrQueue index abstraction Summary: Prefer element indexes where possible. Reviewed-by: shade, mgerdin
hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp
hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc/g1/ptrQueue.cpp
hotspot/src/share/vm/gc/g1/ptrQueue.hpp
hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp
hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp
--- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp	Mon May 08 07:16:10 2017 -0400
@@ -157,8 +157,8 @@
   if (cl == NULL) return true;
   bool result = true;
   void** buf = BufferNode::make_buffer_from_node(node);
-  size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
-  size_t i = DirtyCardQueue::byte_index_to_index(node->index());
+  size_t i = node->index();
+  size_t limit = buffer_size();
   for ( ; i < limit; ++i) {
     jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
     assert(card_ptr != NULL, "invariant");
@@ -168,9 +168,8 @@
     }
   }
   if (consume) {
-    size_t new_index = DirtyCardQueue::index_to_byte_index(i);
-    assert(new_index <= buffer_size(), "invariant");
-    node->set_index(new_index);
+    assert(i <= buffer_size(), "invariant");
+    node->set_index(i);
   }
   return result;
 }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Mon May 08 07:16:10 2017 -0400
@@ -2781,10 +2781,7 @@
   size_t buffer_size = dcqs.buffer_size();
   size_t buffer_num = dcqs.completed_buffers_num();
 
-  // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
-  // in bytes - not the number of 'entries'. We need to convert
-  // into a number of cards.
-  return (buffer_size * buffer_num + extra_cards) / oopSize;
+  return buffer_size * buffer_num + extra_cards;
 }
 
 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.cpp	Mon May 08 07:16:10 2017 -0400
@@ -33,8 +33,13 @@
 #include <new>
 
 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
-  _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
-  _permanent(permanent), _lock(NULL)
+  _qset(qset),
+  _active(active),
+  _permanent(permanent),
+  _index(0),
+  _capacity_in_bytes(0),
+  _buf(NULL),
+  _lock(NULL)
 {}
 
 PtrQueue::~PtrQueue() {
@@ -43,7 +48,7 @@
 
 void PtrQueue::flush_impl() {
   if (_buf != NULL) {
-    BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
+    BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
     if (is_empty()) {
       // No work to do.
       qset()->deallocate_buffer(node);
@@ -51,23 +56,21 @@
       qset()->enqueue_complete_buffer(node);
     }
     _buf = NULL;
-    _index = 0;
+    set_index(0);
   }
 }
 
 
 void PtrQueue::enqueue_known_active(void* ptr) {
-  assert(_index <= _sz, "Invariant.");
-  assert(_index == 0 || _buf != NULL, "invariant");
-
   while (_index == 0) {
     handle_zero_index();
   }
 
-  assert(_index > 0, "postcondition");
-  _index -= sizeof(void*);
-  _buf[byte_index_to_index(_index)] = ptr;
-  assert(_index <= _sz, "Invariant.");
+  assert(_buf != NULL, "postcondition");
+  assert(index() > 0, "postcondition");
+  assert(index() <= capacity(), "invariant");
+  _index -= _element_size;
+  _buf[index()] = ptr;
 }
 
 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
@@ -85,10 +88,8 @@
 }
 
 
-BufferNode* BufferNode::allocate(size_t byte_size) {
-  assert(byte_size > 0, "precondition");
-  assert(is_size_aligned(byte_size, sizeof(void**)),
-         "Invalid buffer size " SIZE_FORMAT, byte_size);
+BufferNode* BufferNode::allocate(size_t size) {
+  size_t byte_size = size * sizeof(void*);
   void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
   return new (data) BufferNode;
 }
@@ -99,10 +100,10 @@
 }
 
 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
+  _buffer_size(0),
   _max_completed_queue(0),
   _cbl_mon(NULL), _fl_lock(NULL),
   _notify_when_complete(notify_when_complete),
-  _sz(0),
   _completed_buffers_head(NULL),
   _completed_buffers_tail(NULL),
   _n_completed_buffers(0),
@@ -133,7 +134,6 @@
 }
 
 void** PtrQueueSet::allocate_buffer() {
-  assert(_sz > 0, "Didn't set a buffer size.");
   BufferNode* node = NULL;
   {
     MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
@@ -144,7 +144,7 @@
     }
   }
   if (node == NULL) {
-    node = BufferNode::allocate(_sz);
+    node = BufferNode::allocate(buffer_size());
   } else {
     // Reinitialize buffer obtained from free list.
     node->set_index(0);
@@ -154,7 +154,6 @@
 }
 
 void PtrQueueSet::deallocate_buffer(BufferNode* node) {
-  assert(_sz > 0, "Didn't set a buffer size.");
   MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
   node->set_next(_fl_owner->_buf_free_list);
   _fl_owner->_buf_free_list = node;
@@ -177,13 +176,13 @@
 }
 
 void PtrQueue::handle_zero_index() {
-  assert(_index == 0, "Precondition.");
+  assert(index() == 0, "precondition");
 
   // This thread records the full buffer and allocates a new one (while
   // holding the lock if there is one).
   if (_buf != NULL) {
     if (!should_enqueue_buffer()) {
-      assert(_index > 0, "the buffer can only be re-used if it's not full");
+      assert(index() > 0, "the buffer can only be re-used if it's not full");
       return;
     }
 
@@ -206,7 +205,7 @@
       // preventing the subsequent the multiple enqueue, and
       // install a newly allocated buffer below.
 
-      BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
+      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
       _buf = NULL;         // clear shared _buf field
 
       locking_enqueue_completed_buffer(node); // enqueue completed buffer
@@ -219,20 +218,21 @@
 
       if (_buf != NULL) return;
     } else {
-      BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
+      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
       if (qset()->process_or_enqueue_complete_buffer(node)) {
         // Recycle the buffer. No allocation.
         assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
-        assert(_sz == qset()->buffer_size(), "invariant");
-        _index = _sz;
+        assert(capacity() == qset()->buffer_size(), "invariant");
+        reset();
         return;
       }
     }
   }
-  // Reallocate the buffer
+  // Set capacity in case this is the first allocation.
+  set_capacity(qset()->buffer_size());
+  // Allocate a new buffer.
   _buf = qset()->allocate_buffer();
-  _sz = qset()->buffer_size();
-  _index = _sz;
+  reset();
 }
 
 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
@@ -296,8 +296,8 @@
 }
 
 void PtrQueueSet::set_buffer_size(size_t sz) {
-  assert(_sz == 0 && sz > 0, "Should be called only once.");
-  _sz = sz * sizeof(void*);
+  assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
+  _buffer_size = sz;
 }
 
 // Merge lists of buffers. Notify the processing threads.
--- a/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/ptrQueue.hpp	Mon May 08 07:16:10 2017 -0400
@@ -53,15 +53,57 @@
   // be legally locked by then.
   const bool _permanent;
 
+  // The (byte) index at which an object was last enqueued.  Starts at
+  // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
+  // Value is always pointer-size aligned.
+  size_t _index;
+
+  // Size of the current buffer, in bytes.
+  // Value is always pointer-size aligned.
+  size_t _capacity_in_bytes;
+
+  static const size_t _element_size = sizeof(void*);
+
+  // Get the capacity, in bytes.  The capacity must have been set.
+  size_t capacity_in_bytes() const {
+    assert(_capacity_in_bytes > 0, "capacity not set");
+    return _capacity_in_bytes;
+  }
+
+  void set_capacity(size_t entries) {
+    size_t byte_capacity = index_to_byte_index(entries);
+    assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
+           "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
+           _capacity_in_bytes, byte_capacity);
+    _capacity_in_bytes = byte_capacity;
+  }
+
+  static size_t byte_index_to_index(size_t ind) {
+    assert(is_size_aligned(ind, _element_size), "precondition");
+    return ind / _element_size;
+  }
+
+  static size_t index_to_byte_index(size_t ind) {
+    return ind * _element_size;
+  }
+
 protected:
   // The buffer.
   void** _buf;
-  // The (byte) index at which an object was last enqueued.  Starts at "_sz"
-  // (indicating an empty buffer) and goes towards zero.
-  size_t _index;
+
+  size_t index() const {
+    return byte_index_to_index(_index);
+  }
 
-  // The (byte) size of the buffer.
-  size_t _sz;
+  void set_index(size_t new_index) {
+    size_t byte_index = index_to_byte_index(new_index);
+    assert(byte_index <= capacity_in_bytes(), "precondition");
+    _index = byte_index;
+  }
+
+  size_t capacity() const {
+    return byte_index_to_index(capacity_in_bytes());
+  }
 
   // If there is a lock associated with this buffer, this is that lock.
   Mutex* _lock;
@@ -84,7 +126,12 @@
   // Associate a lock with a ptr queue.
   void set_lock(Mutex* lock) { _lock = lock; }
 
-  void reset() { if (_buf != NULL) _index = _sz; }
+  // Forcibly set empty.
+  void reset() {
+    if (_buf != NULL) {
+      _index = capacity_in_bytes();
+    }
+  }
 
   void enqueue(volatile void* ptr) {
     enqueue((void*)(ptr));
@@ -109,13 +156,18 @@
 
   void enqueue_known_active(void* ptr);
 
-  size_t size() {
-    assert(_sz >= _index, "Invariant.");
-    return _buf == NULL ? 0 : _sz - _index;
+  // Return the size of the in-use region.
+  size_t size() const {
+    size_t result = 0;
+    if (_buf != NULL) {
+      assert(_index <= capacity_in_bytes(), "Invariant");
+      result = byte_index_to_index(capacity_in_bytes() - _index);
+    }
+    return result;
   }
 
-  bool is_empty() {
-    return _buf == NULL || _sz == _index;
+  bool is_empty() const {
+    return _buf == NULL || capacity_in_bytes() == _index;
   }
 
   // Set the "active" property of the queue to "b".  An enqueue to an
@@ -124,22 +176,14 @@
   void set_active(bool b) {
     _active = b;
     if (!b && _buf != NULL) {
-      _index = _sz;
+      reset();
     } else if (b && _buf != NULL) {
-      assert(_index == _sz, "invariant: queues are empty when activated.");
+      assert(index() == capacity(),
+             "invariant: queues are empty when activated.");
     }
   }
 
-  bool is_active() { return _active; }
-
-  static size_t byte_index_to_index(size_t ind) {
-    assert((ind % sizeof(void*)) == 0, "Invariant.");
-    return ind / sizeof(void*);
-  }
-
-  static size_t index_to_byte_index(size_t ind) {
-    return ind * sizeof(void*);
-  }
+  bool is_active() const { return _active; }
 
   // To support compiler.
 
@@ -156,7 +200,7 @@
     return byte_offset_of(Derived, _buf);
   }
 
-  static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); }
+  static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
 
   template<typename Derived>
   static ByteSize byte_offset_of_active() {
@@ -183,10 +227,10 @@
   BufferNode* next() const     { return _next;  }
   void set_next(BufferNode* n) { _next = n;     }
   size_t index() const         { return _index; }
-  void set_index(size_t i)     { _index = i;    }
+  void set_index(size_t i)     { _index = i; }
 
-  // Allocate a new BufferNode with the "buffer" having size bytes.
-  static BufferNode* allocate(size_t byte_size);
+  // Allocate a new BufferNode with the "buffer" having size elements.
+  static BufferNode* allocate(size_t size);
 
   // Free a BufferNode.
   static void deallocate(BufferNode* node);
@@ -213,6 +257,10 @@
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
+private:
+  // The size of all buffers in the set.
+  size_t _buffer_size;
+
 protected:
   Monitor* _cbl_mon;  // Protects the fields below.
   BufferNode* _completed_buffers_head;
@@ -230,9 +278,6 @@
   // specifies the owner. It is set to "this" by default.
   PtrQueueSet* _fl_owner;
 
-  // The size of all buffers in the set.
-  size_t _sz;
-
   bool _all_active;
 
   // If true, notify_all on _cbl_mon when the threshold is reached.
@@ -270,11 +315,11 @@
 
 public:
 
-  // Return an empty array of size _sz (required to be non-zero).
+  // Return the buffer for a BufferNode of size buffer_size().
   void** allocate_buffer();
 
-  // Return an empty buffer to the free list.  The "buf" argument is
-  // required to be a pointer to the head of an array of length "_sz".
+  // Return an empty buffer to the free list.  The node is required
+  // to have been allocated with a size of buffer_size().
   void deallocate_buffer(BufferNode* node);
 
   // Declares that "buf" is a complete buffer.
@@ -296,8 +341,11 @@
   // can be called.  And should only be called once.
   void set_buffer_size(size_t sz);
 
-  // Get the buffer size.
-  size_t buffer_size() { return _sz; }
+  // Get the buffer size.  Must have been set.
+  size_t buffer_size() const {
+    assert(_buffer_size > 0, "buffer size not set");
+    return _buffer_size;
+  }
 
   // Get/Set the number of completed buffers that triggers log processing.
   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
--- a/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp	Mon May 08 07:16:10 2017 -0400
@@ -118,11 +118,10 @@
     return;
   }
 
-  assert(_index <= _sz, "invariant");
-
   // Two-fingered compaction toward the end.
-  void** src = &buf[byte_index_to_index(_index)];
-  void** dst = &buf[byte_index_to_index(_sz)];
+  void** src = &buf[index()];
+  void** dst = &buf[capacity()];
+  assert(src <= dst, "invariant");
   for ( ; src < dst; ++src) {
     // Search low to high for an entry to keep.
     void* entry = *src;
@@ -139,7 +138,7 @@
   }
   // dst points to the lowest retained entry, or the end of the buffer
   // if all the entries were filtered out.
-  _index = pointer_delta(dst, buf, 1);
+  set_index(dst - buf);
 }
 
 // This method will first apply the above filtering to the buffer. If
@@ -156,12 +155,13 @@
 
   // This method should only be called if there is a non-NULL buffer
   // that is full.
-  assert(_index == 0, "pre-condition");
+  assert(index() == 0, "pre-condition");
   assert(_buf != NULL, "pre-condition");
 
   filter();
 
-  size_t percent_used = ((_sz - _index) * 100) / _sz;
+  size_t cap = capacity();
+  size_t percent_used = ((cap - index()) * 100) / cap;
   bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
   return should_enqueue;
 }
@@ -170,27 +170,27 @@
   assert(SafepointSynchronize::is_at_safepoint(),
          "SATB queues must only be processed at safepoints");
   if (_buf != NULL) {
-    assert(_index % sizeof(void*) == 0, "invariant");
-    assert(_sz % sizeof(void*) == 0, "invariant");
-    assert(_index <= _sz, "invariant");
-    cl->do_buffer(_buf + byte_index_to_index(_index),
-                  byte_index_to_index(_sz - _index));
-    _index = _sz;
+    cl->do_buffer(&_buf[index()], size());
+    reset();
   }
 }
 
 #ifndef PRODUCT
 // Helpful for debugging
 
-void SATBMarkQueue::print(const char* name) {
-  print(name, _buf, _index, _sz);
+static void print_satb_buffer(const char* name,
+                              void** buf,
+                              size_t index,
+                              size_t capacity) {
+  tty->print_cr("  SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
+                " capacity: " SIZE_FORMAT,
+                name, p2i(buf), index, capacity);
 }
 
-void SATBMarkQueue::print(const char* name,
-                          void** buf, size_t index, size_t sz) {
-  tty->print_cr("  SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT " sz: " SIZE_FORMAT,
-                name, p2i(buf), index, sz);
+void SATBMarkQueue::print(const char* name) {
+  print_satb_buffer(name, _buf, index(), capacity());
 }
+
 #endif // PRODUCT
 
 SATBMarkQueueSet::SATBMarkQueueSet() :
@@ -275,8 +275,8 @@
   }
   if (nd != NULL) {
     void **buf = BufferNode::make_buffer_from_node(nd);
-    size_t index = SATBMarkQueue::byte_index_to_index(nd->index());
-    size_t size = SATBMarkQueue::byte_index_to_index(_sz);
+    size_t index = nd->index();
+    size_t size = buffer_size();
     assert(index <= size, "invariant");
     cl->do_buffer(buf + index, size - index);
     deallocate_buffer(nd);
@@ -303,7 +303,7 @@
   while (nd != NULL) {
     void** buf = BufferNode::make_buffer_from_node(nd);
     jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
-    SATBMarkQueue::print(buffer, buf, 0, _sz);
+    print_satb_buffer(buffer, buf, nd->index(), buffer_size());
     nd = nd->next();
     i += 1;
   }
--- a/hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp	Sun May 07 16:42:03 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp	Mon May 08 07:16:10 2017 -0400
@@ -66,7 +66,6 @@
 #ifndef PRODUCT
   // Helpful for debugging
   void print(const char* name);
-  static void print(const char* name, void** buf, size_t index, size_t sz);
 #endif // PRODUCT
 
   // Compiler support.