hotspot/src/share/vm/gc/g1/satbQueue.cpp
changeset 33762 3d1dd03dfd3f
parent 33759 8a0e9139a9c5
parent 33761 329db4b51480
child 33786 ac8da6513351
--- a/hotspot/src/share/vm/gc/g1/satbQueue.cpp	Wed Nov 04 18:10:18 2015 +0000
+++ b/hotspot/src/share/vm/gc/g1/satbQueue.cpp	Wed Nov 04 20:02:54 2015 +0000
@@ -33,6 +33,15 @@
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
 
+ObjPtrQueue::ObjPtrQueue(SATBMarkQueueSet* qset, bool permanent) :
+  // SATB queues are only active during marking cycles. We create
+  // them with their active field set to false. If a thread is
+  // created during a cycle and its SATB queue needs to be activated
+  // before the thread starts running, we'll need to set its active
+  // field to true. This is done in JavaThread::initialize_queues().
+  PtrQueue(qset, permanent, false /* active */)
+{ }
+
 void ObjPtrQueue::flush() {
   // Filter now to possibly save work later.  If filtering empties the
   // buffer then flush_impl can deallocate the buffer.
@@ -99,7 +108,6 @@
 void ObjPtrQueue::filter() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   void** buf = _buf;
-  size_t sz = _sz;
 
   if (buf == NULL) {
     // nothing to do
@@ -107,43 +115,37 @@
   }
 
   // Used for sanity checking at the end of the loop.
-  debug_only(size_t entries = 0; size_t retained = 0;)
-
-  size_t i = sz;
-  size_t new_index = sz;
+  DEBUG_ONLY(size_t entries = 0; size_t retained = 0;)
 
-  while (i > _index) {
-    assert(i > 0, "we should have at least one more entry to process");
-    i -= oopSize;
-    debug_only(entries += 1;)
-    void** p = &buf[byte_index_to_index((int) i)];
-    void* entry = *p;
+  assert(_index <= _sz, "invariant");
+  void** limit = &buf[byte_index_to_index(_index)];
+  void** src = &buf[byte_index_to_index(_sz)];
+  void** dst = src;
+
+  while (limit < src) {
+    DEBUG_ONLY(entries += 1;)
+    --src;
+    void* entry = *src;
     // NULL the entry so that unused parts of the buffer contain NULLs
     // at the end. If we are going to retain it we will copy it to its
     // final place. If we have retained all entries we have visited so
     // far, we'll just end up copying it to the same place.
-    *p = NULL;
+    *src = NULL;
 
     if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
-      assert(new_index > 0, "we should not have already filled up the buffer");
-      new_index -= oopSize;
-      assert(new_index >= i,
-             "new_index should never be below i, as we always compact 'up'");
-      void** new_p = &buf[byte_index_to_index((int) new_index)];
-      assert(new_p >= p, "the destination location should never be below "
-             "the source as we always compact 'up'");
-      assert(*new_p == NULL,
-             "we should have already cleared the destination location");
-      *new_p = entry;
-      debug_only(retained += 1;)
+      --dst;
+      assert(*dst == NULL, "filtering destination should be clear");
+      *dst = entry;
+      DEBUG_ONLY(retained += 1;);
     }
   }
+  size_t new_index = pointer_delta(dst, buf, 1);
 
 #ifdef ASSERT
-  size_t entries_calc = (sz - _index) / oopSize;
+  size_t entries_calc = (_sz - _index) / sizeof(void*);
   assert(entries == entries_calc, "the number of entries we counted "
          "should match the number of entries we calculated");
-  size_t retained_calc = (sz - new_index) / oopSize;
+  size_t retained_calc = (_sz - new_index) / sizeof(void*);
   assert(retained == retained_calc, "the number of retained entries we counted "
          "should match the number of retained entries we calculated");
 #endif // ASSERT
@@ -170,11 +172,8 @@
 
   filter();
 
-  size_t sz = _sz;
-  size_t all_entries = sz / oopSize;
-  size_t retained_entries = (sz - _index) / oopSize;
-  size_t perc = retained_entries * 100 / all_entries;
-  bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
+  size_t percent_used = ((_sz - _index) * 100) / _sz;
+  bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
   return should_enqueue;
 }
 
@@ -185,8 +184,8 @@
     assert(_index % sizeof(void*) == 0, "invariant");
     assert(_sz % sizeof(void*) == 0, "invariant");
     assert(_index <= _sz, "invariant");
-    cl->do_buffer(_buf + byte_index_to_index((int)_index),
-                  byte_index_to_index((int)(_sz - _index)));
+    cl->do_buffer(_buf + byte_index_to_index(_index),
+                  byte_index_to_index(_sz - _index));
     _index = _sz;
   }
 }
@@ -208,7 +207,7 @@
 
 SATBMarkQueueSet::SATBMarkQueueSet() :
   PtrQueueSet(),
-  _shared_satb_queue(this, true /*perm*/) { }
+  _shared_satb_queue(this, true /* permanent */) { }
 
 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
                                   int process_completed_threshold,
@@ -295,7 +294,7 @@
     // Filtering can result in non-full completed buffers; see
     // should_enqueue_buffer.
     assert(_sz % sizeof(void*) == 0, "invariant");
-    size_t limit = ObjPtrQueue::byte_index_to_index((int)_sz);
+    size_t limit = ObjPtrQueue::byte_index_to_index(_sz);
     for (size_t i = 0; i < limit; ++i) {
       if (buf[i] != NULL) {
         // Found the end of the block of NULLs; process the remainder.