8212748: ZGC: Add reentrant locking functionality
Reviewed-by: eosterlund, kbarrett
--- a/src/hotspot/share/gc/z/zLock.hpp Thu Nov 22 09:14:31 2018 +0100
+++ b/src/hotspot/share/gc/z/zLock.hpp Fri Nov 09 14:08:01 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,18 +33,35 @@
public:
ZLock();
+ ~ZLock();
void lock();
bool try_lock();
void unlock();
};
+class ZReentrantLock {
+private:
+ ZLock _lock;
+ Thread* volatile _owner;
+ uint64_t _count;
+
+public:
+ ZReentrantLock();
+
+ void lock();
+ void unlock();
+
+ bool is_owned() const;
+};
+
+template <typename T>
class ZLocker : public StackObj {
private:
- ZLock* const _lock;
+ T* const _lock;
public:
- ZLocker(ZLock* lock);
+ ZLocker(T* lock);
~ZLocker();
};
--- a/src/hotspot/share/gc/z/zLock.inline.hpp Thu Nov 22 09:14:31 2018 +0100
+++ b/src/hotspot/share/gc/z/zLock.inline.hpp Fri Nov 09 14:08:01 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,18 @@
#define SHARE_GC_Z_ZLOCK_INLINE_HPP
#include "gc/z/zLock.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
inline ZLock::ZLock() {
pthread_mutex_init(&_lock, NULL);
}
+inline ZLock::~ZLock() {
+ pthread_mutex_destroy(&_lock);
+}
+
inline void ZLock::lock() {
pthread_mutex_lock(&_lock);
}
@@ -42,12 +49,49 @@
pthread_mutex_unlock(&_lock);
}
-inline ZLocker::ZLocker(ZLock* lock) :
+inline ZReentrantLock::ZReentrantLock() :
+ _lock(),
+ _owner(NULL),
+ _count(0) {}
+
+inline void ZReentrantLock::lock() {
+ Thread* const thread = Thread::current();
+ Thread* const owner = Atomic::load(&_owner);
+
+ if (owner != thread) {
+ _lock.lock();
+ Atomic::store(thread, &_owner);
+ }
+
+ _count++;
+}
+
+inline void ZReentrantLock::unlock() {
+ assert(is_owned(), "Invalid owner");
+ assert(_count > 0, "Invalid count");
+
+ _count--;
+
+ if (_count == 0) {
+ Atomic::store((Thread*)NULL, &_owner);
+ _lock.unlock();
+ }
+}
+
+inline bool ZReentrantLock::is_owned() const {
+ Thread* const thread = Thread::current();
+ Thread* const owner = Atomic::load(&_owner);
+ return owner == thread;
+}
+
+template <typename T>
+inline ZLocker<T>::ZLocker(T* lock) :
_lock(lock) {
_lock->lock();
}
-inline ZLocker::~ZLocker() {
+template <typename T>
+inline ZLocker<T>::~ZLocker() {
_lock->unlock();
}
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Thu Nov 22 09:14:31 2018 +0100
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Fri Nov 09 14:08:01 2018 +0100
@@ -82,7 +82,7 @@
}
uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
- ZLocker locker(&_expand_lock);
+ ZLocker<ZLock> locker(&_expand_lock);
// Retry allocation before expanding
uintptr_t addr = alloc_space(size);
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp Thu Nov 22 09:14:31 2018 +0100
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Fri Nov 09 14:08:01 2018 +0100
@@ -260,7 +260,7 @@
// Free virtual memory
{
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
_virtual.free(page->virtual_memory());
}
@@ -268,7 +268,7 @@
}
void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
list->transfer(&_detached);
}
@@ -376,7 +376,7 @@
// thread have returned from sem_wait(). To avoid this race we are
// forcing the waiting thread to acquire/release the lock held by the
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
}
}
@@ -384,7 +384,7 @@
}
ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
return alloc_page_common(type, size, flags);
}
@@ -477,7 +477,7 @@
}
void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
// Update used statistics
decrease_used(page->size(), reclaimed);
@@ -495,7 +495,7 @@
}
void ZPageAllocator::check_out_of_memory() {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
// Fail allocation requests that were enqueued before the
// last GC cycle started, otherwise start a new GC cycle.