8209850: Allow NamedThreads to use GlobalCounter critical sections
Summary: Add NamedThreads iterator and make GlobalCounter use it.
Reviewed-by: eosterlund, rehn
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -43,7 +43,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
-#include "utilities/spinYield.hpp"
OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {}
@@ -495,48 +494,6 @@
return true;
}
-OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {}
-
-// Begin read-side critical section.
-uint OopStorage::ProtectActive::read_enter() {
- return Atomic::add(2u, &_enter);
-}
-
-// End read-side critical section.
-void OopStorage::ProtectActive::read_exit(uint enter_value) {
- Atomic::add(2u, &_exit[enter_value & 1]);
-}
-
-// Wait until all readers that entered the critical section before
-// synchronization have exited that critical section.
-void OopStorage::ProtectActive::write_synchronize() {
- SpinYield spinner;
- // Determine old and new exit counters, based on bit0 of the
- // on-entry _enter counter.
- uint value = OrderAccess::load_acquire(&_enter);
- volatile uint* new_ptr = &_exit[(value + 1) & 1];
- // Atomically change the in-use exit counter to the new counter, by
- // adding 1 to the _enter counter (flipping bit0 between 0 and 1)
- // and initializing the new exit counter to that enter value. Note:
- // The new exit counter is not being used by read operations until
- // this change succeeds.
- uint old;
- do {
- old = value;
- *new_ptr = ++value;
- value = Atomic::cmpxchg(value, &_enter, old);
- } while (old != value);
- // Readers that entered the critical section before we changed the
- // selected exit counter will use the old exit counter. Readers
- // entering after the change will use the new exit counter. Wait
- // for all the critical sections started before the change to
- // complete, e.g. for the value of old_ptr to catch up with old.
- volatile uint* old_ptr = &_exit[old & 1];
- while (old != OrderAccess::load_acquire(old_ptr)) {
- spinner.wait();
- }
-}
-
// Make new_array the _active_array. Increments new_array's refcount
// to account for the new reference. The assignment is atomic wrto
// obtain_active_array; once this function returns, it is safe for the
@@ -548,7 +505,10 @@
// Install new_array, ensuring its initialization is complete first.
OrderAccess::release_store(&_active_array, new_array);
// Wait for any readers that could read the old array from _active_array.
- _protect_active.write_synchronize();
+ // Can't use GlobalCounter here, because this is called from allocate(),
+ // which may be called in the scope of a GlobalCounter critical section
+ // when inserting a StringTable entry.
+ _protect_active.synchronize();
// All obtain critical sections that could see the old array have
// completed, having incremented the refcount of the old array. The
// caller can now safely relinquish the old array.
@@ -560,10 +520,9 @@
// _active_array. The caller must relinquish the array when done
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
- uint enter_value = _protect_active.read_enter();
+ SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
ActiveArray* result = OrderAccess::load_acquire(&_active_array);
result->increment_refcount();
- _protect_active.read_exit(enter_value);
return result;
}
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp Thu Aug 23 18:14:53 2018 -0400
@@ -29,6 +29,7 @@
#include "oops/oop.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
+#include "utilities/singleWriterSynchronizer.hpp"
class Mutex;
class outputStream;
@@ -203,19 +204,6 @@
void unlink(const Block& block);
};
- // RCU-inspired protection of access to _active_array.
- class ProtectActive {
- volatile uint _enter;
- volatile uint _exit[2];
-
- public:
- ProtectActive();
-
- uint read_enter();
- void read_exit(uint enter_value);
- void write_synchronize();
- };
-
private:
const char* _name;
ActiveArray* _active_array;
@@ -229,7 +217,7 @@
volatile size_t _allocation_count;
// Protection for _active_array.
- mutable ProtectActive _protect_active;
+ mutable SingleWriterSynchronizer _protect_active;
// mutable because this gets set even for const iteration.
mutable bool _concurrent_iteration_active;
--- a/src/hotspot/share/runtime/mutexLocker.cpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/runtime/mutexLocker.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -76,6 +76,7 @@
Monitor* Safepoint_lock = NULL;
Monitor* SerializePage_lock = NULL;
Monitor* Threads_lock = NULL;
+Mutex* NamedThreadsList_lock = NULL;
Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL;
Monitor* FullGCCount_lock = NULL;
@@ -256,6 +257,7 @@
def(Safepoint_lock , PaddedMonitor, safepoint, true, Monitor::_safepoint_check_sometimes); // locks SnippetCache_lock/Threads_lock
def(Threads_lock , PaddedMonitor, barrier, true, Monitor::_safepoint_check_sometimes);
+ def(NamedThreadsList_lock , PaddedMutex, leaf, true, Monitor::_safepoint_check_never);
def(VMOperationQueue_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes); // VM_thread allowed to block on these
def(VMOperationRequest_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes);
--- a/src/hotspot/share/runtime/mutexLocker.hpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/runtime/mutexLocker.hpp Thu Aug 23 18:14:53 2018 -0400
@@ -72,6 +72,7 @@
extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction
extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads
// (also used by Safepoints too to block threads creation/destruction)
+extern Mutex* NamedThreadsList_lock; // a lock on the NamedThreads list
extern Monitor* CGC_lock; // used for coordination between
// fore- & background GC threads.
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
--- a/src/hotspot/share/runtime/thread.cpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/runtime/thread.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -114,6 +114,7 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
+#include "utilities/singleWriterSynchronizer.hpp"
#include "utilities/vmError.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciCompiler.hpp"
@@ -1206,15 +1207,61 @@
THREAD);
}
+// List of all NamedThreads and safe iteration over that list.
+
+class NamedThread::List {
+public:
+ NamedThread* volatile _head;
+ SingleWriterSynchronizer _protect;
+
+ List() : _head(NULL), _protect() {}
+};
+
+NamedThread::List NamedThread::_the_list;
+
+NamedThread::Iterator::Iterator() :
+ _protect_enter(_the_list._protect.enter()),
+ _current(OrderAccess::load_acquire(&_the_list._head))
+{}
+
+NamedThread::Iterator::~Iterator() {
+ _the_list._protect.exit(_protect_enter);
+}
+
+void NamedThread::Iterator::step() {
+ assert(!end(), "precondition");
+ _current = OrderAccess::load_acquire(&_current->_next_named_thread);
+}
+
// NamedThread -- non-JavaThread subclasses with multiple
// uniquely named instances should derive from this.
-NamedThread::NamedThread() : Thread() {
- _name = NULL;
- _processed_thread = NULL;
- _gc_id = GCId::undefined();
+NamedThread::NamedThread() :
+ Thread(),
+ _name(NULL),
+ _processed_thread(NULL),
+ _gc_id(GCId::undefined()),
+ _next_named_thread(NULL)
+{
+ // Add this thread to _the_list.
+ MutexLockerEx lock(NamedThreadsList_lock, Mutex::_no_safepoint_check_flag);
+ _next_named_thread = _the_list._head;
+ OrderAccess::release_store(&_the_list._head, this);
}
NamedThread::~NamedThread() {
+ // Remove this thread from _the_list.
+ {
+ MutexLockerEx lock(NamedThreadsList_lock, Mutex::_no_safepoint_check_flag);
+ NamedThread* volatile* p = &_the_list._head;
+ for (NamedThread* t = *p; t != NULL; p = &t->_next_named_thread, t = *p) {
+ if (t == this) {
+ *p = this->_next_named_thread;
+ // Wait for any in-progress iterators.
+ _the_list._protect.synchronize();
+ break;
+ }
+ }
+ }
if (_name != NULL) {
FREE_C_HEAP_ARRAY(char, _name);
_name = NULL;
--- a/src/hotspot/share/runtime/thread.hpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/runtime/thread.hpp Thu Aug 23 18:14:53 2018 -0400
@@ -103,6 +103,7 @@
// - JavaThread
// - various subclasses eg CompilerThread, ServiceThread
// - WatcherThread
+// - JfrSamplerThread
class Thread: public ThreadShadow {
friend class VMStructs;
@@ -776,6 +777,10 @@
// log JavaThread being processed by oops_do
JavaThread* _processed_thread;
uint _gc_id; // The current GC id when a thread takes part in GC
+ NamedThread* volatile _next_named_thread;
+
+ class List;
+ static List _the_list;
public:
NamedThread();
@@ -791,6 +796,31 @@
void set_gc_id(uint gc_id) { _gc_id = gc_id; }
uint gc_id() { return _gc_id; }
+
+ class Iterator;
+};
+
+// Provides iteration over the list of NamedThreads. Because list
+// management occurs in the NamedThread constructor and destructor,
+// entries in the list may not be fully constructed instances of a
+// derived class. Threads created after an iterator is constructed
+// will not be visited by the iterator. The scope of an iterator is a
+// critical section; there must be no safepoint checks in that scope.
+class NamedThread::Iterator : public StackObj {
+ uint _protect_enter;
+ NamedThread* _current;
+
+ // Noncopyable.
+ Iterator(const Iterator&);
+ Iterator& operator=(const Iterator&);
+
+public:
+ Iterator();
+ ~Iterator();
+
+ bool end() const { return _current == NULL; }
+ NamedThread* current() const { return _current; }
+ void step();
};
// Worker threads are named and have an id of an assigned work.
--- a/src/hotspot/share/utilities/globalCounter.cpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/utilities/globalCounter.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -71,5 +71,7 @@
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
ctc.do_thread(thread);
}
- ctc.do_thread(VMThread::vm_thread());
+ for (NamedThread::Iterator nti; !nti.end(); nti.step()) {
+ ctc.do_thread(nti.current());
+ }
}
--- a/src/hotspot/share/utilities/globalCounter.inline.hpp Thu Aug 23 12:57:40 2018 -0700
+++ b/src/hotspot/share/utilities/globalCounter.inline.hpp Thu Aug 23 18:14:53 2018 -0400
@@ -31,16 +31,16 @@
inline void GlobalCounter::critical_section_begin(Thread *thread) {
assert(thread == Thread::current(), "must be current thread");
- assert(thread->is_VM_thread() || thread->is_Java_thread(), "must be VMThread or JavaThread");
- assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "nestled critical sections, not supported yet");
+ assert(thread->is_Named_thread() || thread->is_Java_thread(), "must be NamedThread or JavaThread");
+ assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "nested critical sections, not supported yet");
uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter);
OrderAccess::release_store_fence(thread->get_rcu_counter(), gbl_cnt | COUNTER_ACTIVE);
}
inline void GlobalCounter::critical_section_end(Thread *thread) {
assert(thread == Thread::current(), "must be current thread");
- assert(thread->is_VM_thread() || thread->is_Java_thread(), "must be VMThread or JavaThread");
- assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in ctitical section");
+ assert(thread->is_Named_thread() || thread->is_Java_thread(), "must be NamedThread or JavaThread");
+ assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
// Mainly for debugging we set it to 'now'.
uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter);
OrderAccess::release_store(thread->get_rcu_counter(), gbl_cnt);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/singleWriterSynchronizer.hpp"
+#include "utilities/macros.hpp"
+
+SingleWriterSynchronizer::SingleWriterSynchronizer() :
+ _enter(0),
+ _exit(),
+ // The initial value of 1 for _waiting_for puts it on the inactive
+ // track, so no thread exiting a critical section will match it.
+ _waiting_for(1),
+ _wakeup()
+ DEBUG_ONLY(COMMA _writers(0))
+{}
+
+// Wait until all threads that entered a critical section before
+// synchronization have exited that critical section.
+void SingleWriterSynchronizer::synchronize() {
+ // Side-effect in assert balanced by debug-only dec at end.
+ assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
+ // We don't know anything about the muxing between this invocation
+ // and invocations in other threads. We must start with the latest
+ // _enter polarity, else we could clobber the wrong _exit value on
+ // the first iteration. So fence to ensure everything here follows
+ // whatever muxing was used.
+ OrderAccess::fence();
+ uint value = _enter;
+ // (1) Determine the old and new exit counters, based on the
+ // polarity (bit0 value) of the on-entry enter counter.
+ volatile uint* new_ptr = &_exit[(value + 1) & 1];
+ // (2) Change the in-use exit counter to the new counter, by adding
+ // 1 to the enter counter (flipping the polarity), meanwhile
+ // "simultaneously" initializing the new exit counter to that enter
+ // value. Note: The new exit counter is not being used by read
+ // operations until this change of _enter succeeds.
+ uint old;
+ do {
+ old = value;
+ *new_ptr = ++value;
+ value = Atomic::cmpxchg(value, &_enter, old);
+ } while (old != value);
+ // Critical sections entered before we changed the polarity will use
+ // the old exit counter. Critical sections entered after the change
+ // will use the new exit counter.
+ volatile uint* old_ptr = &_exit[old & 1];
+ assert(old_ptr != new_ptr, "invariant");
+ // (3) Inform threads in in-progress critical sections that there is
+ // a pending synchronize waiting. The thread that completes the
+ // request (_exit value == old) will signal the _wakeup semaphore to
+ // allow us to proceed.
+ _waiting_for = old;
+ // Write of _waiting_for must precede read of _exit and associated
+ // conditional semaphore wait. If they were re-ordered then a
+ // critical section exit could miss the wakeup request, failing to
+ // signal us while we're waiting.
+ OrderAccess::fence();
+ // (4) Wait for all the critical sections started before the change
+ // to complete, e.g. for the value of old_ptr to catch up with old.
+ // Loop because there could be pending wakeups unrelated to this
+ // synchronize request.
+ while (old != OrderAccess::load_acquire(old_ptr)) {
+ _wakeup.wait();
+ }
+ // (5) Drain any pending wakeups. A critical section exit may have
+ // completed our request and seen our _waiting_for before we checked
+ // for completion. There are also possible (though rare) spurious
+ // wakeup signals in the timing gap between changing the _enter
+ // polarity and setting _waiting_for. Enough of any of those could
+ // lead to semaphore overflow. This doesn't guarantee no unrelated
+ // wakeups for the next wait, but prevents unbounded accumulation.
+ while (_wakeup.trywait()) {}
+ DEBUG_ONLY(Atomic::dec(&_writers);)
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp Thu Aug 23 18:14:53 2018 -0400
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
+#define SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/semaphore.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Synchronization primitive inspired by RCU.
+//
+// Any number of threads may enter critical sections associated with a
+// synchronizer object. One (at a time) other thread may wait for the
+// completion of all critical sections for the synchronizer object
+// that were extent when the wait was initiated. Usage is that there
+// is some state that can be accessed either before or after some
+// change. An accessing thread performs the access within a critical
+// section. A writer thread performs the state change, and then waits
+// for critical sections to complete, thereby ensuring there are no
+// threads in a critical section that might have seen the old state.
+//
+// Generally, GlobalCounter should be used instead of this class, as
+// GlobalCounter has measurably better performance and doesn't have
+// the single writer at a time restriction. Use this only in
+// situations where GlobalCounter won't work for some reason, such as
+// nesting. But note that nesting often indicates other problems, and
+// may risk deadlock.
+class SingleWriterSynchronizer {
+ volatile uint _enter;
+ volatile uint _exit[2];
+ volatile uint _waiting_for;
+ Semaphore _wakeup;
+
+ DEBUG_ONLY(volatile uint _writers;)
+
+ // Noncopyable.
+ SingleWriterSynchronizer(const SingleWriterSynchronizer&);
+ SingleWriterSynchronizer& operator=(const SingleWriterSynchronizer&);
+
+public:
+ SingleWriterSynchronizer();
+
+ // Enter a critical section for this synchronizer. Entering a
+ // critical section never blocks. While in a critical section, a
+ // thread should avoid blocking, or even take a long time. In
+ // particular, a thread must never safepoint while in a critical
+ // section.
+ // Precondition: The current thread must not already be in a
+ // critical section for this synchronizer.
+ inline uint enter();
+
+ // Exit a critical section for this synchronizer.
+ // Precondition: enter_value must be the result of the corresponding
+ // enter() for the critical section.
+ inline void exit(uint enter_value);
+
+ // Wait until all threads currently in a critical section for this
+ // synchronizer have exited their critical section. Threads that
+ // enter a critical section after the synchronization has started
+ // are not considered in the wait.
+ // Precondition: No other thread may be synchronizing on this
+ // synchronizer.
+ void synchronize();
+
+ // RAII class for managing enter/exit pairs.
+ class CriticalSection;
+};
+
+inline uint SingleWriterSynchronizer::enter() {
+ return Atomic::add(2u, &_enter);
+}
+
+inline void SingleWriterSynchronizer::exit(uint enter_value) {
+ uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]);
+ // If this exit completes a synchronize request, wakeup possibly
+ // waiting synchronizer. Read of _waiting_for must follow the _exit
+ // update.
+ if (exit_value == _waiting_for) {
+ _wakeup.signal();
+ }
+}
+
+class SingleWriterSynchronizer::CriticalSection : public StackObj {
+ SingleWriterSynchronizer* _synchronizer;
+ uint _enter_value;
+
+public:
+ // Enter synchronizer's critical section.
+ explicit CriticalSection(SingleWriterSynchronizer* synchronizer) :
+ _synchronizer(synchronizer),
+ _enter_value(synchronizer->enter())
+ {}
+
+ // Exit synchronizer's critical section.
+ ~CriticalSection() {
+ _synchronizer->exit(_enter_value);
+ }
+};
+
+#endif // SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp Thu Aug 23 18:14:53 2018 -0400
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalCounter.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+#include "utilities/singleWriterSynchronizer.hpp"
+#include "threadHelper.inline.hpp"
+#include "unittest.hpp"
+
+class SingleWriterSynchronizerTestReader : public JavaTestThread {
+ SingleWriterSynchronizer* _synchronizer;
+ volatile uintx* _synchronized_value;
+ volatile int* _continue_running;
+
+ static const uint reader_iterations = 10;
+
+public:
+ SingleWriterSynchronizerTestReader(Semaphore* post,
+ SingleWriterSynchronizer* synchronizer,
+ volatile uintx* synchronized_value,
+ volatile int* continue_running) :
+ JavaTestThread(post),
+ _synchronizer(synchronizer),
+ _synchronized_value(synchronized_value),
+ _continue_running(continue_running)
+ {}
+
+ virtual void main_run() {
+ uintx iterations = 0;
+ while (OrderAccess::load_acquire(_continue_running) != 0) {
+ ++iterations;
+ SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
+ uintx value = OrderAccess::load_acquire(_synchronized_value);
+ for (uint i = 0; i < reader_iterations; ++i) {
+ uintx new_value = OrderAccess::load_acquire(_synchronized_value);
+ // A reader can see either the value it first read after
+ // entering the critical section, or that value + 1. No other
+ // values are possible.
+ if (value != new_value) {
+ ASSERT_EQ((value + 1), new_value);
+ }
+ }
+ }
+ tty->print_cr("reader iterations: " UINTX_FORMAT, iterations);
+ }
+};
+
+class SingleWriterSynchronizerTestWriter : public JavaTestThread {
+ SingleWriterSynchronizer* _synchronizer;
+ volatile uintx* _synchronized_value;
+ volatile int* _continue_running;
+
+public:
+ SingleWriterSynchronizerTestWriter(Semaphore* post,
+ SingleWriterSynchronizer* synchronizer,
+ volatile uintx* synchronized_value,
+ volatile int* continue_running) :
+ JavaTestThread(post),
+ _synchronizer(synchronizer),
+ _synchronized_value(synchronized_value),
+ _continue_running(continue_running)
+ {}
+
+ virtual void main_run() {
+ while (OrderAccess::load_acquire(_continue_running) != 0) {
+ ++*_synchronized_value;
+ _synchronizer->synchronize();
+ }
+ tty->print_cr("writer iterations: " UINTX_FORMAT, *_synchronized_value);
+ }
+};
+
+const uint nreaders = 5;
+const uint milliseconds_to_run = 3000;
+
+TEST_VM(TestSingleWriterSynchronizer, stress) {
+ Semaphore post;
+ SingleWriterSynchronizer synchronizer;
+ volatile uintx synchronized_value = 0;
+ volatile int continue_running = 1;
+
+ JavaTestThread* readers[nreaders] = {};
+ for (uint i = 0; i < nreaders; ++i) {
+ readers[i] = new SingleWriterSynchronizerTestReader(&post,
+ &synchronizer,
+ &synchronized_value,
+ &continue_running);
+ readers[i]->doit();
+ }
+
+ JavaTestThread* writer =
+ new SingleWriterSynchronizerTestWriter(&post,
+ &synchronizer,
+ &synchronized_value,
+ &continue_running);
+
+ writer->doit();
+
+ tty->print_cr("Stressing synchronizer for %u ms", milliseconds_to_run);
+ {
+ ThreadInVMfromNative invm(JavaThread::current());
+ os::sleep(Thread::current(), milliseconds_to_run, true);
+ }
+ continue_running = 0;
+ for (uint i = 0; i < nreaders + 1; ++i) {
+ post.wait();
+ }
+}