8232604: ZGC: Make ZVerifyViews mapping and unmapping precise
Reviewed-by: pliden, eosterlund
--- a/src/hotspot/share/gc/z/zFuture.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zFuture.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,10 @@
T _value;
public:
+ ZFuture();
+
void set(T value);
+ T peek();
T get();
};
--- a/src/hotspot/share/gc/z/zFuture.inline.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zFuture.inline.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -29,6 +29,10 @@
#include "runtime/thread.hpp"
template <typename T>
+inline ZFuture<T>::ZFuture() :
+ _value() {}
+
+template <typename T>
inline void ZFuture<T>::set(T value) {
// Set value
_value = value;
@@ -38,6 +42,11 @@
}
template <typename T>
+inline T ZFuture<T>::peek() {
+ return _value;
+}
+
+template <typename T>
inline T ZFuture<T>::get() {
// Wait for notification
Thread* const thread = Thread::current();
--- a/src/hotspot/share/gc/z/zHeap.cpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeap.cpp Mon Oct 28 11:27:27 2019 +0100
@@ -244,34 +244,14 @@
return _page_allocator.uncommit(delay);
}
-void ZHeap::before_flip() {
- if (ZVerifyViews) {
- // Unmap all pages
- _page_allocator.debug_unmap_all_pages();
- }
-}
-
-void ZHeap::after_flip() {
- if (ZVerifyViews) {
- // Map all pages
- ZPageTableIterator iter(&_page_table);
- for (ZPage* page; iter.next(&page);) {
- _page_allocator.debug_map_page(page);
- }
- _page_allocator.debug_map_cached_pages();
- }
-}
-
void ZHeap::flip_to_marked() {
- before_flip();
+ ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_marked();
- after_flip();
}
void ZHeap::flip_to_remapped() {
- before_flip();
+ ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_remapped();
- after_flip();
}
void ZHeap::mark_start() {
@@ -460,6 +440,14 @@
iter.objects_do(cl, visit_weaks);
}
+void ZHeap::pages_do(ZPageClosure* cl) {
+ ZPageTableIterator iter(&_page_table);
+ for (ZPage* page; iter.next(&page);) {
+ cl->do_page(page);
+ }
+ _page_allocator.pages_do(cl);
+}
+
void ZHeap::serviceability_initialize() {
_serviceability.initialize();
}
--- a/src/hotspot/share/gc/z/zHeap.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zHeap.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -63,9 +63,6 @@
size_t heap_max_size() const;
size_t heap_max_reserve_size() const;
- void before_flip();
- void after_flip();
-
void flip_to_marked();
void flip_to_remapped();
@@ -151,6 +148,7 @@
// Iteration
void object_iterate(ObjectClosure* cl, bool visit_weaks);
+ void pages_do(ZPageClosure* cl);
// Serviceability
void serviceability_initialize();
--- a/src/hotspot/share/gc/z/zPage.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPage.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -112,4 +112,9 @@
void print() const;
};
+class ZPageClosure {
+public:
+ virtual void do_page(const ZPage* page) = 0;
+};
+
#endif // SHARE_GC_Z_ZPAGE_HPP
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Mon Oct 28 11:27:27 2019 +0100
@@ -60,7 +60,9 @@
_type(type),
_size(size),
_flags(flags),
- _total_collections(total_collections) {}
+ _total_collections(total_collections),
+ _node(),
+ _result() {}
uint8_t type() const {
return _type;
@@ -78,6 +80,10 @@
return _total_collections;
}
+ ZPage* peek() {
+ return _result.peek();
+ }
+
ZPage* wait() {
return _result.get();
}
@@ -108,6 +114,7 @@
_allocated(0),
_reclaimed(0),
_queue(),
+ _satisfied(),
_safe_delete(),
_uncommit(false),
_initialized(false) {
@@ -289,11 +296,7 @@
void ZPageAllocator::map_page(const ZPage* page) const {
// Map physical memory
- if (!page->is_mapped()) {
- _physical.map(page->physical_memory(), page->start());
- } else if (ZVerifyViews) {
- _physical.debug_map(page->physical_memory(), page->start());
- }
+ _physical.map(page->physical_memory(), page->start());
}
size_t ZPageAllocator::max_available(bool no_reserve) const {
@@ -433,14 +436,21 @@
} while (page == gc_marker);
{
- // Guard deletion of underlying semaphore. This is a workaround for a
- // bug in sem_post() in glibc < 2.21, where it's not safe to destroy
+ //
+ // We grab the lock here for two different reasons:
+ //
+ // 1) Guard deletion of underlying semaphore. This is a workaround for
+ // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
// the semaphore immediately after returning from sem_wait(). The
// reason is that sem_post() can touch the semaphore after a waiting
// thread have returned from sem_wait(). To avoid this race we are
// forcing the waiting thread to acquire/release the lock held by the
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+ //
+ // 2) Guard the list of satisfied pages.
+ //
ZLocker<ZLock> locker(&_lock);
+ _satisfied.remove(&request);
}
}
@@ -462,7 +472,9 @@
}
// Map page if needed
- map_page(page);
+ if (!page->is_mapped()) {
+ map_page(page);
+ }
// Reset page. This updates the page's sequence number and must
// be done after page allocation, which potentially blocked in
@@ -500,6 +512,7 @@
// the dequeue operation must happen first, since the request
// will immediately be deallocated once it has been satisfied.
_queue.remove(request);
+ _satisfied.insert_first(request);
request->satisfy(page);
}
}
@@ -686,28 +699,21 @@
_physical.debug_map(page->physical_memory(), page->start());
}
-class ZPageCacheDebugMapClosure : public StackObj {
-private:
- const ZPageAllocator* const _allocator;
-
-public:
- ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
- _allocator(allocator) {}
-
- virtual void do_page(const ZPage* page) {
- _allocator->debug_map_page(page);
- }
-};
-
-void ZPageAllocator::debug_map_cached_pages() const {
+void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- ZPageCacheDebugMapClosure cl(this);
- _cache.pages_do(&cl);
+ _physical.debug_unmap(page->physical_memory(), page->start());
}
-void ZPageAllocator::debug_unmap_all_pages() const {
- assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
- _physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
+void ZPageAllocator::pages_do(ZPageClosure* cl) const {
+ ZListIterator<ZPageAllocRequest> iter(&_satisfied);
+ for (ZPageAllocRequest* request; iter.next(&request);) {
+ const ZPage* const page = request->peek();
+ if (page != NULL) {
+ cl->do_page(page);
+ }
+ }
+
+ _cache.pages_do(cl);
}
bool ZPageAllocator::is_alloc_stalled() const {
@@ -728,7 +734,8 @@
}
// Out of memory, fail allocation request
- _queue.remove_first();
+ _queue.remove(request);
+ _satisfied.insert_first(request);
request->satisfy(NULL);
}
}
--- a/src/hotspot/share/gc/z/zPageAllocator.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageAllocator.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -54,11 +54,12 @@
size_t _allocated;
ssize_t _reclaimed;
ZList<ZPageAllocRequest> _queue;
+ ZList<ZPageAllocRequest> _satisfied;
mutable ZSafeDelete<ZPage> _safe_delete;
bool _uncommit;
bool _initialized;
- static ZPage* const gc_marker;
+ static ZPage* const gc_marker;
void prime_cache(size_t size);
@@ -117,11 +118,12 @@
void map_page(const ZPage* page) const;
void debug_map_page(const ZPage* page) const;
- void debug_map_cached_pages() const;
- void debug_unmap_all_pages() const;
+ void debug_unmap_page(const ZPage* page) const;
bool is_alloc_stalled() const;
void check_out_of_memory();
+
+ void pages_do(ZPageClosure* cl) const;
};
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
--- a/src/hotspot/share/gc/z/zPageCache.cpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageCache.cpp Mon Oct 28 11:27:27 2019 +0100
@@ -240,3 +240,26 @@
flush_list(cl, &_medium, to);
flush_per_numa_lists(cl, &_small, to);
}
+
+void ZPageCache::pages_do(ZPageClosure* cl) const {
+ // Small
+ ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
+ for (const ZList<ZPage>* list; iter_numa.next(&list);) {
+ ZListIterator<ZPage> iter_small(list);
+ for (ZPage* page; iter_small.next(&page);) {
+ cl->do_page(page);
+ }
+ }
+
+ // Medium
+ ZListIterator<ZPage> iter_medium(&_medium);
+ for (ZPage* page; iter_medium.next(&page);) {
+ cl->do_page(page);
+ }
+
+ // Large
+ ZListIterator<ZPage> iter_large(&_large);
+ for (ZPage* page; iter_large.next(&page);) {
+ cl->do_page(page);
+ }
+}
--- a/src/hotspot/share/gc/z/zPageCache.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageCache.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -71,7 +71,7 @@
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
- template <typename Closure> void pages_do(Closure* cl) const;
+ void pages_do(ZPageClosure* cl) const;
};
#endif // SHARE_GC_Z_ZPAGECACHE_HPP
--- a/src/hotspot/share/gc/z/zPageCache.inline.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zPageCache.inline.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -32,28 +32,4 @@
return _available;
}
-template <typename Closure>
-inline void ZPageCache::pages_do(Closure* cl) const {
- // Small
- ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
- for (const ZList<ZPage>* list; iter_numa.next(&list);) {
- ZListIterator<ZPage> iter_small(list);
- for (ZPage* page; iter_small.next(&page);) {
- cl->do_page(page);
- }
- }
-
- // Medium
- ZListIterator<ZPage> iter_medium(&_medium);
- for (ZPage* page; iter_medium.next(&page);) {
- cl->do_page(page);
- }
-
- // Large
- ZListIterator<ZPage> iter_large(&_large);
- for (ZPage* page; iter_large.next(&page);) {
- cl->do_page(page);
- }
-}
-
#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
--- a/src/hotspot/share/gc/z/zVerify.cpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zVerify.cpp Mon Oct 28 11:27:27 2019 +0100
@@ -26,6 +26,7 @@
#include "gc/z/zAddress.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.hpp"
+#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zResurrection.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
@@ -170,3 +171,38 @@
ZStatTimerDisable disable;
roots_and_objects(true /* verify_weaks */);
}
+
+template <bool Map>
+class ZPageDebugMapOrUnmapClosure : public ZPageClosure {
+private:
+ const ZPageAllocator* const _allocator;
+
+public:
+ ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) :
+ _allocator(allocator) {}
+
+ void do_page(const ZPage* page) {
+ if (Map) {
+ _allocator->debug_map_page(page);
+ } else {
+ _allocator->debug_unmap_page(page);
+ }
+ }
+};
+
+ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) :
+ _allocator(allocator) {
+ if (ZVerifyViews) {
+ // Unmap all pages
+ ZPageDebugMapOrUnmapClosure<false /* Map */> cl(_allocator);
+ ZHeap::heap()->pages_do(&cl);
+ }
+}
+
+ZVerifyViewsFlip::~ZVerifyViewsFlip() {
+ if (ZVerifyViews) {
+ // Map all pages
+ ZPageDebugMapOrUnmapClosure<true /* Map */> cl(_allocator);
+ ZHeap::heap()->pages_do(&cl);
+ }
+}
--- a/src/hotspot/share/gc/z/zVerify.hpp Mon Oct 28 11:26:53 2019 +0100
+++ b/src/hotspot/share/gc/z/zVerify.hpp Mon Oct 28 11:27:27 2019 +0100
@@ -26,6 +26,8 @@
#include "memory/allocation.hpp"
+class ZPageAllocator;
+
class ZVerify : public AllStatic {
private:
template <typename RootsIterator> static void roots();
@@ -45,4 +47,13 @@
static void after_weak_processing();
};
+class ZVerifyViewsFlip {
+private:
+ const ZPageAllocator* const _allocator;
+
+public:
+ ZVerifyViewsFlip(const ZPageAllocator* allocator);
+ ~ZVerifyViewsFlip();
+};
+
#endif // SHARE_GC_Z_ZVERIFY_HPP