8228657: ZGC: ZObjectAllocator::used() should take undone allocations into account
authorpliden
Tue, 06 Aug 2019 15:50:00 +0200
changeset 57657 22e12dd8f21a
parent 57656 9429ecaee2e0
child 57658 0022b39ae5ae
8228657: ZGC: ZObjectAllocator::used() should take undone allocations into account Reviewed-by: eosterlund
src/hotspot/share/gc/z/zObjectAllocator.cpp
src/hotspot/share/gc/z/zObjectAllocator.hpp
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Aug 06 15:49:53 2019 +0200
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp	Tue Aug 06 15:50:00 2019 +0200
@@ -44,6 +44,7 @@
 ZObjectAllocator::ZObjectAllocator(uint nworkers) :
     _nworkers(nworkers),
     _used(0),
+    _undone(0),
     _shared_medium_page(NULL),
     _shared_small_page(NULL),
     _worker_small_page(NULL) {}
@@ -58,6 +59,13 @@
   return page;
 }
 
+void ZObjectAllocator::undo_alloc_page(ZPage* page) {
+  // Increment undone bytes
+  Atomic::add(page->size(), _undone.addr());
+
+  ZHeap::heap()->undo_alloc_page(page);
+}
+
 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
                                                         uint8_t page_type,
                                                         size_t page_size,
@@ -99,7 +107,7 @@
         addr = prev_addr;
 
         // Undo new page allocation
-        ZHeap::heap()->undo_alloc_page(new_page);
+        undo_alloc_page(new_page);
       }
     }
   }
@@ -208,7 +216,7 @@
   assert(page->type() == ZPageTypeLarge, "Invalid page type");
 
   // Undo page allocation
-  ZHeap::heap()->undo_alloc_page(page);
+  undo_alloc_page(page);
   return true;
 }
 
@@ -268,13 +276,19 @@
 
 size_t ZObjectAllocator::used() const {
   size_t total_used = 0;
+  size_t total_undone = 0;
 
-  ZPerCPUConstIterator<size_t> iter(&_used);
-  for (const size_t* cpu_used; iter.next(&cpu_used);) {
+  ZPerCPUConstIterator<size_t> iter_used(&_used);
+  for (const size_t* cpu_used; iter_used.next(&cpu_used);) {
     total_used += *cpu_used;
   }
 
-  return total_used;
+  ZPerCPUConstIterator<size_t> iter_undone(&_undone);
+  for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) {
+    total_undone += *cpu_undone;
+  }
+
+  return total_used - total_undone;
 }
 
 size_t ZObjectAllocator::remaining() const {
@@ -291,8 +305,9 @@
 void ZObjectAllocator::retire_pages() {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 
-  // Reset used
+  // Reset used and undone bytes
   _used.set_all(0);
+  _undone.set_all(0);
 
   // Reset allocation pages
   _shared_medium_page.set(NULL);
--- a/src/hotspot/share/gc/z/zObjectAllocator.hpp	Tue Aug 06 15:49:53 2019 +0200
+++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp	Tue Aug 06 15:50:00 2019 +0200
@@ -33,11 +33,13 @@
 private:
   const uint         _nworkers;
   ZPerCPU<size_t>    _used;
+  ZPerCPU<size_t>    _undone;
   ZContended<ZPage*> _shared_medium_page;
   ZPerCPU<ZPage*>    _shared_small_page;
   ZPerWorker<ZPage*> _worker_small_page;
 
   ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
+  void undo_alloc_page(ZPage* page);
 
   // Allocate an object in a shared page. Allocate and
   // atomically install a new page if necessary.