8174231: Factor out and share PlatformEvent and Parker code for POSIX systems
Reviewed-by: stuefe, rehn, dcubed
--- a/hotspot/src/os/aix/vm/os_aix.cpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp Tue May 30 17:14:52 2017 -0400
@@ -595,7 +595,7 @@
// signal support
debug_only(static bool signal_sets_initialized = false);
-static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
+static sigset_t unblocked_sigs, vm_sigs;
bool os::Aix::is_sig_ignored(int sig) {
struct sigaction oact;
@@ -626,7 +626,6 @@
// In reality, though, unblocking these signals is really a nop, since
// these signals are not blocked by default.
sigemptyset(&unblocked_sigs);
- sigemptyset(&allowdebug_blocked_sigs);
sigaddset(&unblocked_sigs, SIGILL);
sigaddset(&unblocked_sigs, SIGSEGV);
sigaddset(&unblocked_sigs, SIGBUS);
@@ -637,15 +636,12 @@
if (!ReduceSignalUsage) {
if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
}
if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
// Fill in signals that are blocked by all but the VM thread.
@@ -669,12 +665,6 @@
return &vm_sigs;
}
-// These are signals that are blocked during cond_wait to allow debugger in
-sigset_t* os::Aix::allowdebug_blocked_signals() {
- assert(signal_sets_initialized, "Not initialized");
- return &allowdebug_blocked_sigs;
-}
-
void os::Aix::hotspot_sigmask(Thread* thread) {
//Save caller's signal mask before setting VM signal mask
@@ -3482,11 +3472,15 @@
Aix::_main_thread = pthread_self();
initial_time_count = os::elapsed_counter();
+
+ os::Posix::init();
}
// This is called _after_ the global arguments have been parsed.
jint os::init_2(void) {
+ os::Posix::init_2();
+
if (os::Aix::on_pase()) {
trcVerbose("Running on PASE.");
} else {
@@ -4369,347 +4363,6 @@
return s;
}
-// Refer to the comments in os_solaris.cpp park-unpark.
-
-// utility to compute the abstime argument to timedwait:
-// millis is the relative timeout time
-// abstime will be the absolute timeout time
-// TODO: replace compute_abstime() with unpackTime()
-
-static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
- if (millis < 0) millis = 0;
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
- jlong seconds = millis / 1000;
- millis %= 1000;
- if (seconds > 50000000) { // see man cond_timedwait(3T)
- seconds = 50000000;
- }
- abstime->tv_sec = now.tv_sec + seconds;
- long usec = now.tv_usec + millis * 1000;
- if (usec >= 1000000) {
- abstime->tv_sec += 1;
- usec -= 1000000;
- }
- abstime->tv_nsec = usec * 1000;
- return abstime;
-}
-
-// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
-// Conceptually TryPark() should be equivalent to park(0).
-
-int os::PlatformEvent::TryPark() {
- for (;;) {
- const int v = _Event;
- guarantee ((v == 0) || (v == 1), "invariant");
- if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
- }
-}
-
-void os::PlatformEvent::park() { // AKA "down()"
- // Invariant: Only the thread associated with the Event/PlatformEvent
- // may call park().
- // TODO: assert that _Assoc != NULL or _Assoc == Self
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
- }
- guarantee (v >= 0, "invariant");
- if (v == 0) {
- // Do this the hard way by blocking ...
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee (_nParked == 0, "invariant");
- ++ _nParked;
- while (_Event < 0) {
- status = pthread_cond_wait(_cond, _mutex);
- assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
- }
- -- _nParked;
-
- // In theory we could move the ST of 0 into _Event past the unlock(),
- // but then we'd need a MEMBAR after the ST.
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- }
- guarantee (_Event >= 0, "invariant");
-}
-
-int os::PlatformEvent::park(jlong millis) {
- guarantee (_nParked == 0, "invariant");
-
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
- }
- guarantee (v >= 0, "invariant");
- if (v != 0) return OS_OK;
-
- // We do this the hard way, by blocking the thread.
- // Consider enforcing a minimum timeout value.
- struct timespec abst;
- compute_abstime(&abst, millis);
-
- int ret = OS_TIMEOUT;
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee (_nParked == 0, "invariant");
- ++_nParked;
-
- // Object.wait(timo) will return because of
- // (a) notification
- // (b) timeout
- // (c) thread.interrupt
- //
- // Thread.interrupt and object.notify{All} both call Event::set.
- // That is, we treat thread.interrupt as a special case of notification.
- // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
- // We assume all ETIME returns are valid.
- //
- // TODO: properly differentiate simultaneous notify+interrupt.
- // In that case, we should propagate the notify to another waiter.
-
- while (_Event < 0) {
- status = pthread_cond_timedwait(_cond, _mutex, &abst);
- assert_status(status == 0 || status == ETIMEDOUT,
- status, "cond_timedwait");
- if (!FilterSpuriousWakeups) break; // previous semantics
- if (status == ETIMEDOUT) break;
- // We consume and ignore EINTR and spurious wakeups.
- }
- --_nParked;
- if (_Event >= 0) {
- ret = OS_OK;
- }
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- assert (_nParked == 0, "invariant");
- return ret;
-}
-
-void os::PlatformEvent::unpark() {
- int v, AnyWaiters;
- for (;;) {
- v = _Event;
- if (v > 0) {
- // The LD of _Event could have reordered or be satisfied
- // by a read-aside from this processor's write buffer.
- // To avoid problems execute a barrier and then
- // ratify the value.
- OrderAccess::fence();
- if (_Event == v) return;
- continue;
- }
- if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
- }
- if (v < 0) {
- // Wait for the thread associated with the event to vacate
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- AnyWaiters = _nParked;
-
- if (AnyWaiters != 0) {
- // We intentional signal *after* dropping the lock
- // to avoid a common class of futile wakeups.
- status = pthread_cond_signal(_cond);
- assert_status(status == 0, status, "cond_signal");
- }
- // Mutex should be locked for pthread_cond_signal(_cond).
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- }
-
- // Note that we signal() _after dropping the lock for "immortal" Events.
- // This is safe and avoids a common class of futile wakeups. In rare
- // circumstances this can cause a thread to return prematurely from
- // cond_{timed}wait() but the spurious wakeup is benign and the victim will
- // simply re-test the condition and re-park itself.
-}
-
-
-// JSR166
-// -------------------------------------------------------
-
-//
-// The solaris and linux implementations of park/unpark are fairly
-// conservative for now, but can be improved. They currently use a
-// mutex/condvar pair, plus a a count.
-// Park decrements count if > 0, else does a condvar wait. Unpark
-// sets count to 1 and signals condvar. Only one thread ever waits
-// on the condvar. Contention seen when trying to park implies that someone
-// is unparking you, so don't wait. And spurious returns are fine, so there
-// is no need to track notifications.
-//
-
-#define MAX_SECS 100000000
-//
-// This code is common to linux and solaris and will be moved to a
-// common place in dolphin.
-//
-// The passed in time value is either a relative time in nanoseconds
-// or an absolute time in milliseconds. Either way it has to be unpacked
-// into suitable seconds and nanoseconds components and stored in the
-// given timespec structure.
-// Given time is a 64-bit value and the time_t used in the timespec is only
-// a signed-32-bit value (except on 64-bit Linux) we have to watch for
-// overflow if times way in the future are given. Further on Solaris versions
-// prior to 10 there is a restriction (see cond_timedwait) that the specified
-// number of seconds, in abstime, is less than current_time + 100,000,000.
-// As it will be 28 years before "now + 100000000" will overflow we can
-// ignore overflow and just impose a hard-limit on seconds using the value
-// of "now + 100,000,000". This places a limit on the timeout of about 3.17
-// years from "now".
-//
-
-static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
- assert (time > 0, "convertTime");
-
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
-
- time_t max_secs = now.tv_sec + MAX_SECS;
-
- if (isAbsolute) {
- jlong secs = time / 1000;
- if (secs > max_secs) {
- absTime->tv_sec = max_secs;
- }
- else {
- absTime->tv_sec = secs;
- }
- absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
- }
- else {
- jlong secs = time / NANOSECS_PER_SEC;
- if (secs >= MAX_SECS) {
- absTime->tv_sec = max_secs;
- absTime->tv_nsec = 0;
- }
- else {
- absTime->tv_sec = now.tv_sec + secs;
- absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
- if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
- absTime->tv_nsec -= NANOSECS_PER_SEC;
- ++absTime->tv_sec; // note: this must be <= max_secs
- }
- }
- }
- assert(absTime->tv_sec >= 0, "tv_sec < 0");
- assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
- assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
- assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
-}
-
-void Parker::park(bool isAbsolute, jlong time) {
- // Optional fast-path check:
- // Return immediately if a permit is available.
- if (_counter > 0) {
- _counter = 0;
- OrderAccess::fence();
- return;
- }
-
- Thread* thread = Thread::current();
- assert(thread->is_Java_thread(), "Must be JavaThread");
- JavaThread *jt = (JavaThread *)thread;
-
- // Optional optimization -- avoid state transitions if there's an interrupt pending.
- // Check interrupt before trying to wait
- if (Thread::is_interrupted(thread, false)) {
- return;
- }
-
- // Next, demultiplex/decode time arguments
- timespec absTime;
- if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
- return;
- }
- if (time > 0) {
- unpackTime(&absTime, isAbsolute, time);
- }
-
- // Enter safepoint region
- // Beware of deadlocks such as 6317397.
- // The per-thread Parker:: mutex is a classic leaf-lock.
- // In particular a thread must never block on the Threads_lock while
- // holding the Parker:: mutex. If safepoints are pending both the
- // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
- ThreadBlockInVM tbivm(jt);
-
- // Don't wait if cannot get lock since interference arises from
- // unblocking. Also. check interrupt before trying wait
- if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
- return;
- }
-
- int status;
- if (_counter > 0) { // no wait needed
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert (status == 0, "invariant");
- OrderAccess::fence();
- return;
- }
-
-#ifdef ASSERT
- // Don't catch signals while blocked; let the running threads have the signals.
- // (This allows a debugger to break into the running thread.)
- sigset_t oldsigs;
- sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
- pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
-#endif
-
- OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
-
- if (time == 0) {
- status = pthread_cond_wait (_cond, _mutex);
- } else {
- status = pthread_cond_timedwait (_cond, _mutex, &absTime);
- }
- assert_status(status == 0 || status == EINTR ||
- status == ETIME || status == ETIMEDOUT,
- status, "cond_timedwait");
-
-#ifdef ASSERT
- pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
-#endif
-
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- // If externally suspended while waiting, re-suspend
- if (jt->handle_special_suspend_equivalent_condition()) {
- jt->java_suspend_self();
- }
-
- OrderAccess::fence();
-}
-
-void Parker::unpark() {
- int s, status;
- status = pthread_mutex_lock(_mutex);
- assert (status == 0, "invariant");
- s = _counter;
- _counter = 1;
- if (s < 1) {
- status = pthread_mutex_unlock(_mutex);
- assert (status == 0, "invariant");
- status = pthread_cond_signal (_cond);
- assert (status == 0, "invariant");
- } else {
- pthread_mutex_unlock(_mutex);
- assert (status == 0, "invariant");
- }
-}
-
extern char** environ;
// Run the specified command in a separate process. Return its exit value,
--- a/hotspot/src/os/aix/vm/os_aix.hpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.hpp Tue May 30 17:14:52 2017 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -131,7 +131,6 @@
static sigset_t* unblocked_signals();
static sigset_t* vm_signals();
- static sigset_t* allowdebug_blocked_signals();
// For signal-chaining
static struct sigaction *get_chained_signal_action(int sig);
@@ -223,58 +222,4 @@
};
-
-class PlatformEvent : public CHeapObj<mtInternal> {
- private:
- double CachePad [4]; // increase odds that _mutex is sole occupant of cache line
- volatile int _Event;
- volatile int _nParked;
- pthread_mutex_t _mutex [1];
- pthread_cond_t _cond [1];
- double PostPad [2];
- Thread * _Assoc;
-
- public: // TODO-FIXME: make dtor private
- ~PlatformEvent() { guarantee (0, "invariant"); }
-
- public:
- PlatformEvent() {
- int status;
- status = pthread_cond_init (_cond, NULL);
- assert_status(status == 0, status, "cond_init");
- status = pthread_mutex_init (_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- _Event = 0;
- _nParked = 0;
- _Assoc = NULL;
- }
-
- // Use caution with reset() and fired() -- they may require MEMBARs
- void reset() { _Event = 0; }
- int fired() { return _Event; }
- void park ();
- void unpark ();
- int TryPark ();
- int park (jlong millis);
- void SetAssociation (Thread * a) { _Assoc = a; }
-};
-
-class PlatformParker : public CHeapObj<mtInternal> {
- protected:
- pthread_mutex_t _mutex [1];
- pthread_cond_t _cond [1];
-
- public: // TODO-FIXME: make dtor private
- ~PlatformParker() { guarantee (0, "invariant"); }
-
- public:
- PlatformParker() {
- int status;
- status = pthread_cond_init (_cond, NULL);
- assert_status(status == 0, status, "cond_init");
- status = pthread_mutex_init (_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- }
-};
-
#endif // OS_AIX_VM_OS_AIX_HPP
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Tue May 30 17:14:52 2017 -0400
@@ -162,7 +162,6 @@
// utility functions
static int SR_initialize();
-static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
julong os::available_memory() {
return Bsd::available_memory();
@@ -533,7 +532,7 @@
// signal support
debug_only(static bool signal_sets_initialized = false);
-static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
+static sigset_t unblocked_sigs, vm_sigs;
bool os::Bsd::is_sig_ignored(int sig) {
struct sigaction oact;
@@ -564,7 +563,6 @@
// In reality, though, unblocking these signals is really a nop, since
// these signals are not blocked by default.
sigemptyset(&unblocked_sigs);
- sigemptyset(&allowdebug_blocked_sigs);
sigaddset(&unblocked_sigs, SIGILL);
sigaddset(&unblocked_sigs, SIGSEGV);
sigaddset(&unblocked_sigs, SIGBUS);
@@ -574,15 +572,13 @@
if (!ReduceSignalUsage) {
if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
+
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
// Fill in signals that are blocked by all but the VM thread.
@@ -608,12 +604,6 @@
return &vm_sigs;
}
-// These are signals that are blocked during cond_wait to allow debugger in
-sigset_t* os::Bsd::allowdebug_blocked_signals() {
- assert(signal_sets_initialized, "Not initialized");
- return &allowdebug_blocked_sigs;
-}
-
void os::Bsd::hotspot_sigmask(Thread* thread) {
//Save caller's signal mask before setting VM signal mask
@@ -3404,7 +3394,6 @@
// this is called _before_ the most of global arguments have been parsed
void os::init(void) {
char dummy; // used to get a guess on initial stack address
-// first_hrtime = gethrtime();
// With BsdThreads the JavaMain thread pid (primordial thread)
// is different than the pid of the java launcher thread.
@@ -3445,6 +3434,8 @@
// binding of all symbols now, thus binding when alignment is known-good.
_dyld_bind_fully_image_containing_address((const void *) &os::init);
#endif
+
+ os::Posix::init();
}
// To install functions for atexit system call
@@ -3456,6 +3447,9 @@
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
+
+ os::Posix::init_2();
+
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
@@ -3523,7 +3517,7 @@
// the future if the appropriate cleanup code can be added to the
// VM_Exit VMOperation's doit method.
if (atexit(perfMemory_exit_helper) != 0) {
- warning("os::init2 atexit(perfMemory_exit_helper) failed");
+ warning("os::init_2 atexit(perfMemory_exit_helper) failed");
}
}
@@ -4028,365 +4022,6 @@
}
}
-
-// Refer to the comments in os_solaris.cpp park-unpark. The next two
-// comment paragraphs are worth repeating here:
-//
-// Assumption:
-// Only one parker can exist on an event, which is why we allocate
-// them per-thread. Multiple unparkers can coexist.
-//
-// _Event serves as a restricted-range semaphore.
-// -1 : thread is blocked, i.e. there is a waiter
-// 0 : neutral: thread is running or ready,
-// could have been signaled after a wait started
-// 1 : signaled - thread is running or ready
-//
-
-// utility to compute the abstime argument to timedwait:
-// millis is the relative timeout time
-// abstime will be the absolute timeout time
-// TODO: replace compute_abstime() with unpackTime()
-
-static struct timespec* compute_abstime(struct timespec* abstime,
- jlong millis) {
- if (millis < 0) millis = 0;
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
- jlong seconds = millis / 1000;
- millis %= 1000;
- if (seconds > 50000000) { // see man cond_timedwait(3T)
- seconds = 50000000;
- }
- abstime->tv_sec = now.tv_sec + seconds;
- long usec = now.tv_usec + millis * 1000;
- if (usec >= 1000000) {
- abstime->tv_sec += 1;
- usec -= 1000000;
- }
- abstime->tv_nsec = usec * 1000;
- return abstime;
-}
-
-void os::PlatformEvent::park() { // AKA "down()"
- // Transitions for _Event:
- // -1 => -1 : illegal
- // 1 => 0 : pass - return immediately
- // 0 => -1 : block; then set _Event to 0 before returning
-
- // Invariant: Only the thread associated with the Event/PlatformEvent
- // may call park().
- // TODO: assert that _Assoc != NULL or _Assoc == Self
- assert(_nParked == 0, "invariant");
-
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
- }
- guarantee(v >= 0, "invariant");
- if (v == 0) {
- // Do this the hard way by blocking ...
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee(_nParked == 0, "invariant");
- ++_nParked;
- while (_Event < 0) {
- status = pthread_cond_wait(_cond, _mutex);
- // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
- // Treat this the same as if the wait was interrupted
- if (status == ETIMEDOUT) { status = EINTR; }
- assert_status(status == 0 || status == EINTR, status, "cond_wait");
- }
- --_nParked;
-
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other.
- OrderAccess::fence();
- }
- guarantee(_Event >= 0, "invariant");
-}
-
-int os::PlatformEvent::park(jlong millis) {
- // Transitions for _Event:
- // -1 => -1 : illegal
- // 1 => 0 : pass - return immediately
- // 0 => -1 : block; then set _Event to 0 before returning
-
- guarantee(_nParked == 0, "invariant");
-
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
- }
- guarantee(v >= 0, "invariant");
- if (v != 0) return OS_OK;
-
- // We do this the hard way, by blocking the thread.
- // Consider enforcing a minimum timeout value.
- struct timespec abst;
- compute_abstime(&abst, millis);
-
- int ret = OS_TIMEOUT;
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee(_nParked == 0, "invariant");
- ++_nParked;
-
- // Object.wait(timo) will return because of
- // (a) notification
- // (b) timeout
- // (c) thread.interrupt
- //
- // Thread.interrupt and object.notify{All} both call Event::set.
- // That is, we treat thread.interrupt as a special case of notification.
- // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
- // We assume all ETIME returns are valid.
- //
- // TODO: properly differentiate simultaneous notify+interrupt.
- // In that case, we should propagate the notify to another waiter.
-
- while (_Event < 0) {
- status = pthread_cond_timedwait(_cond, _mutex, &abst);
- assert_status(status == 0 || status == EINTR ||
- status == ETIMEDOUT,
- status, "cond_timedwait");
- if (!FilterSpuriousWakeups) break; // previous semantics
- if (status == ETIMEDOUT) break;
- // We consume and ignore EINTR and spurious wakeups.
- }
- --_nParked;
- if (_Event >= 0) {
- ret = OS_OK;
- }
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- assert(_nParked == 0, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other.
- OrderAccess::fence();
- return ret;
-}
-
-void os::PlatformEvent::unpark() {
- // Transitions for _Event:
- // 0 => 1 : just return
- // 1 => 1 : just return
- // -1 => either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
- // See also: "Semaphores in Plan 9" by Mullender & Cox
- //
- // Note: Forcing a transition from "-1" to "1" on an unpark() means
- // that it will take two back-to-back park() calls for the owning
- // thread to block. This has the benefit of forcing a spurious return
- // from the first park() call after an unpark() call which will help
- // shake out uses of park() and unpark() without condition variables.
-
- if (Atomic::xchg(1, &_Event) >= 0) return;
-
- // Wait for the thread associated with the event to vacate
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- int AnyWaiters = _nParked;
- assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- if (AnyWaiters != 0) {
- // Note that we signal() *after* dropping the lock for "immortal" Events.
- // This is safe and avoids a common class of futile wakeups. In rare
- // circumstances this can cause a thread to return prematurely from
- // cond_{timed}wait() but the spurious wakeup is benign and the victim
- // will simply re-test the condition and re-park itself.
- // This provides particular benefit if the underlying platform does not
- // provide wait morphing.
- status = pthread_cond_signal(_cond);
- assert_status(status == 0, status, "cond_signal");
- }
-}
-
-
-// JSR166
-// -------------------------------------------------------
-
-// The solaris and bsd implementations of park/unpark are fairly
-// conservative for now, but can be improved. They currently use a
-// mutex/condvar pair, plus a a count.
-// Park decrements count if > 0, else does a condvar wait. Unpark
-// sets count to 1 and signals condvar. Only one thread ever waits
-// on the condvar. Contention seen when trying to park implies that someone
-// is unparking you, so don't wait. And spurious returns are fine, so there
-// is no need to track notifications.
-
-#define MAX_SECS 100000000
-
-// This code is common to bsd and solaris and will be moved to a
-// common place in dolphin.
-//
-// The passed in time value is either a relative time in nanoseconds
-// or an absolute time in milliseconds. Either way it has to be unpacked
-// into suitable seconds and nanoseconds components and stored in the
-// given timespec structure.
-// Given time is a 64-bit value and the time_t used in the timespec is only
-// a signed-32-bit value (except on 64-bit Bsd) we have to watch for
-// overflow if times way in the future are given. Further on Solaris versions
-// prior to 10 there is a restriction (see cond_timedwait) that the specified
-// number of seconds, in abstime, is less than current_time + 100,000,000.
-// As it will be 28 years before "now + 100000000" will overflow we can
-// ignore overflow and just impose a hard-limit on seconds using the value
-// of "now + 100,000,000". This places a limit on the timeout of about 3.17
-// years from "now".
-
-static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
- assert(time > 0, "convertTime");
-
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
-
- time_t max_secs = now.tv_sec + MAX_SECS;
-
- if (isAbsolute) {
- jlong secs = time / 1000;
- if (secs > max_secs) {
- absTime->tv_sec = max_secs;
- } else {
- absTime->tv_sec = secs;
- }
- absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
- } else {
- jlong secs = time / NANOSECS_PER_SEC;
- if (secs >= MAX_SECS) {
- absTime->tv_sec = max_secs;
- absTime->tv_nsec = 0;
- } else {
- absTime->tv_sec = now.tv_sec + secs;
- absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
- if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
- absTime->tv_nsec -= NANOSECS_PER_SEC;
- ++absTime->tv_sec; // note: this must be <= max_secs
- }
- }
- }
- assert(absTime->tv_sec >= 0, "tv_sec < 0");
- assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
- assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
- assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
-}
-
-void Parker::park(bool isAbsolute, jlong time) {
- // Ideally we'd do something useful while spinning, such
- // as calling unpackTime().
-
- // Optional fast-path check:
- // Return immediately if a permit is available.
- // We depend on Atomic::xchg() having full barrier semantics
- // since we are doing a lock-free update to _counter.
- if (Atomic::xchg(0, &_counter) > 0) return;
-
- Thread* thread = Thread::current();
- assert(thread->is_Java_thread(), "Must be JavaThread");
- JavaThread *jt = (JavaThread *)thread;
-
- // Optional optimization -- avoid state transitions if there's an interrupt pending.
- // Check interrupt before trying to wait
- if (Thread::is_interrupted(thread, false)) {
- return;
- }
-
- // Next, demultiplex/decode time arguments
- struct timespec absTime;
- if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
- return;
- }
- if (time > 0) {
- unpackTime(&absTime, isAbsolute, time);
- }
-
-
- // Enter safepoint region
- // Beware of deadlocks such as 6317397.
- // The per-thread Parker:: mutex is a classic leaf-lock.
- // In particular a thread must never block on the Threads_lock while
- // holding the Parker:: mutex. If safepoints are pending both the
- // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
- ThreadBlockInVM tbivm(jt);
-
- // Don't wait if cannot get lock since interference arises from
- // unblocking. Also. check interrupt before trying wait
- if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
- return;
- }
-
- int status;
- if (_counter > 0) { // no wait needed
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other and Java-level accesses.
- OrderAccess::fence();
- return;
- }
-
-#ifdef ASSERT
- // Don't catch signals while blocked; let the running threads have the signals.
- // (This allows a debugger to break into the running thread.)
- sigset_t oldsigs;
- sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
- pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
-#endif
-
- OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
-
- if (time == 0) {
- status = pthread_cond_wait(_cond, _mutex);
- } else {
- status = pthread_cond_timedwait(_cond, _mutex, &absTime);
- }
- assert_status(status == 0 || status == EINTR ||
- status == ETIMEDOUT,
- status, "cond_timedwait");
-
-#ifdef ASSERT
- pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
-#endif
-
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other and Java-level accesses.
- OrderAccess::fence();
-
- // If externally suspended while waiting, re-suspend
- if (jt->handle_special_suspend_equivalent_condition()) {
- jt->java_suspend_self();
- }
-}
-
-void Parker::unpark() {
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "invariant");
- const int s = _counter;
- _counter = 1;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- if (s < 1) {
- status = pthread_cond_signal(_cond);
- assert_status(status == 0, status, "invariant");
- }
-}
-
-
// Darwin has no "environ" in a dynamic library.
#ifdef __APPLE__
#include <crt_externs.h>
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp Tue May 30 17:14:52 2017 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,7 +114,6 @@
static sigset_t* unblocked_signals();
static sigset_t* vm_signals();
- static sigset_t* allowdebug_blocked_signals();
// For signal-chaining
static struct sigaction *get_chained_signal_action(int sig);
@@ -168,57 +167,4 @@
static int get_node_by_cpu(int cpu_id);
};
-
-class PlatformEvent : public CHeapObj<mtInternal> {
- private:
- double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
- volatile int _Event;
- volatile int _nParked;
- pthread_mutex_t _mutex[1];
- pthread_cond_t _cond[1];
- double PostPad[2];
- Thread * _Assoc;
-
- public: // TODO-FIXME: make dtor private
- ~PlatformEvent() { guarantee(0, "invariant"); }
-
- public:
- PlatformEvent() {
- int status;
- status = pthread_cond_init(_cond, NULL);
- assert_status(status == 0, status, "cond_init");
- status = pthread_mutex_init(_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- _Event = 0;
- _nParked = 0;
- _Assoc = NULL;
- }
-
- // Use caution with reset() and fired() -- they may require MEMBARs
- void reset() { _Event = 0; }
- int fired() { return _Event; }
- void park();
- void unpark();
- int park(jlong millis);
- void SetAssociation(Thread * a) { _Assoc = a; }
-};
-
-class PlatformParker : public CHeapObj<mtInternal> {
- protected:
- pthread_mutex_t _mutex[1];
- pthread_cond_t _cond[1];
-
- public: // TODO-FIXME: make dtor private
- ~PlatformParker() { guarantee(0, "invariant"); }
-
- public:
- PlatformParker() {
- int status;
- status = pthread_cond_init(_cond, NULL);
- assert_status(status == 0, status, "cond_init");
- status = pthread_mutex_init(_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- }
-};
-
#endif // OS_BSD_VM_OS_BSD_HPP
--- a/hotspot/src/os/linux/vm/os_linux.cpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Tue May 30 17:14:52 2017 -0400
@@ -145,7 +145,6 @@
uint32_t os::Linux::_os_version = 0;
const char * os::Linux::_glibc_version = NULL;
const char * os::Linux::_libpthread_version = NULL;
-pthread_condattr_t os::Linux::_condattr[1];
static jlong initial_time_count=0;
@@ -161,9 +160,6 @@
static int SR_signum = SIGUSR2;
sigset_t SR_sigset;
-// Declarations
-static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
-
// utility functions
static int SR_initialize();
@@ -386,7 +382,7 @@
// signal support
debug_only(static bool signal_sets_initialized = false);
-static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
+static sigset_t unblocked_sigs, vm_sigs;
bool os::Linux::is_sig_ignored(int sig) {
struct sigaction oact;
@@ -417,7 +413,6 @@
// In reality, though, unblocking these signals is really a nop, since
// these signals are not blocked by default.
sigemptyset(&unblocked_sigs);
- sigemptyset(&allowdebug_blocked_sigs);
sigaddset(&unblocked_sigs, SIGILL);
sigaddset(&unblocked_sigs, SIGSEGV);
sigaddset(&unblocked_sigs, SIGBUS);
@@ -430,15 +425,12 @@
if (!ReduceSignalUsage) {
if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
}
if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
// Fill in signals that are blocked by all but the VM thread.
@@ -464,12 +456,6 @@
return &vm_sigs;
}
-// These are signals that are blocked during cond_wait to allow debugger in
-sigset_t* os::Linux::allowdebug_blocked_signals() {
- assert(signal_sets_initialized, "Not initialized");
- return &allowdebug_blocked_sigs;
-}
-
void os::Linux::hotspot_sigmask(Thread* thread) {
//Save caller's signal mask before setting VM signal mask
@@ -4828,29 +4814,11 @@
Linux::clock_init();
initial_time_count = javaTimeNanos();
- // pthread_condattr initialization for monotonic clock
- int status;
- pthread_condattr_t* _condattr = os::Linux::condAttr();
- if ((status = pthread_condattr_init(_condattr)) != 0) {
- fatal("pthread_condattr_init: %s", os::strerror(status));
- }
- // Only set the clock if CLOCK_MONOTONIC is available
- if (os::supports_monotonic_clock()) {
- if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
- if (status == EINVAL) {
- warning("Unable to use monotonic clock with relative timed-waits" \
- " - changes to the time-of-day clock may have adverse affects");
- } else {
- fatal("pthread_condattr_setclock: %s", os::strerror(status));
- }
- }
- }
- // else it defaults to CLOCK_REALTIME
-
// retrieve entry point for pthread_setname_np
Linux::_pthread_setname_np =
(int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
+ os::Posix::init();
}
// To install functions for atexit system call
@@ -4862,6 +4830,9 @@
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
+
+ os::Posix::init_2();
+
Linux::fast_thread_clock_init();
// Allocate a single page and mark it as readable for safepoint polling
@@ -5582,406 +5553,6 @@
}
}
-
-// Refer to the comments in os_solaris.cpp park-unpark. The next two
-// comment paragraphs are worth repeating here:
-//
-// Assumption:
-// Only one parker can exist on an event, which is why we allocate
-// them per-thread. Multiple unparkers can coexist.
-//
-// _Event serves as a restricted-range semaphore.
-// -1 : thread is blocked, i.e. there is a waiter
-// 0 : neutral: thread is running or ready,
-// could have been signaled after a wait started
-// 1 : signaled - thread is running or ready
-//
-
-// utility to compute the abstime argument to timedwait:
-// millis is the relative timeout time
-// abstime will be the absolute timeout time
-// TODO: replace compute_abstime() with unpackTime()
-
-static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
- if (millis < 0) millis = 0;
-
- jlong seconds = millis / 1000;
- millis %= 1000;
- if (seconds > 50000000) { // see man cond_timedwait(3T)
- seconds = 50000000;
- }
-
- if (os::supports_monotonic_clock()) {
- struct timespec now;
- int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
- assert_status(status == 0, status, "clock_gettime");
- abstime->tv_sec = now.tv_sec + seconds;
- long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
- if (nanos >= NANOSECS_PER_SEC) {
- abstime->tv_sec += 1;
- nanos -= NANOSECS_PER_SEC;
- }
- abstime->tv_nsec = nanos;
- } else {
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
- abstime->tv_sec = now.tv_sec + seconds;
- long usec = now.tv_usec + millis * 1000;
- if (usec >= 1000000) {
- abstime->tv_sec += 1;
- usec -= 1000000;
- }
- abstime->tv_nsec = usec * 1000;
- }
- return abstime;
-}
-
-void os::PlatformEvent::park() { // AKA "down()"
- // Transitions for _Event:
- // -1 => -1 : illegal
- // 1 => 0 : pass - return immediately
- // 0 => -1 : block; then set _Event to 0 before returning
-
- // Invariant: Only the thread associated with the Event/PlatformEvent
- // may call park().
- // TODO: assert that _Assoc != NULL or _Assoc == Self
- assert(_nParked == 0, "invariant");
-
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
- }
- guarantee(v >= 0, "invariant");
- if (v == 0) {
- // Do this the hard way by blocking ...
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee(_nParked == 0, "invariant");
- ++_nParked;
- while (_Event < 0) {
- status = pthread_cond_wait(_cond, _mutex);
- // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
- // Treat this the same as if the wait was interrupted
- if (status == ETIME) { status = EINTR; }
- assert_status(status == 0 || status == EINTR, status, "cond_wait");
- }
- --_nParked;
-
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other.
- OrderAccess::fence();
- }
- guarantee(_Event >= 0, "invariant");
-}
-
-int os::PlatformEvent::park(jlong millis) {
- // Transitions for _Event:
- // -1 => -1 : illegal
- // 1 => 0 : pass - return immediately
- // 0 => -1 : block; then set _Event to 0 before returning
-
- guarantee(_nParked == 0, "invariant");
-
- int v;
- for (;;) {
- v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
- }
- guarantee(v >= 0, "invariant");
- if (v != 0) return OS_OK;
-
- // We do this the hard way, by blocking the thread.
- // Consider enforcing a minimum timeout value.
- struct timespec abst;
- compute_abstime(&abst, millis);
-
- int ret = OS_TIMEOUT;
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- guarantee(_nParked == 0, "invariant");
- ++_nParked;
-
- // Object.wait(timo) will return because of
- // (a) notification
- // (b) timeout
- // (c) thread.interrupt
- //
- // Thread.interrupt and object.notify{All} both call Event::set.
- // That is, we treat thread.interrupt as a special case of notification.
- // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
- // We assume all ETIME returns are valid.
- //
- // TODO: properly differentiate simultaneous notify+interrupt.
- // In that case, we should propagate the notify to another waiter.
-
- while (_Event < 0) {
- status = pthread_cond_timedwait(_cond, _mutex, &abst);
- assert_status(status == 0 || status == EINTR ||
- status == ETIME || status == ETIMEDOUT,
- status, "cond_timedwait");
- if (!FilterSpuriousWakeups) break; // previous semantics
- if (status == ETIME || status == ETIMEDOUT) break;
- // We consume and ignore EINTR and spurious wakeups.
- }
- --_nParked;
- if (_Event >= 0) {
- ret = OS_OK;
- }
- _Event = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- assert(_nParked == 0, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other.
- OrderAccess::fence();
- return ret;
-}
-
-void os::PlatformEvent::unpark() {
- // Transitions for _Event:
- // 0 => 1 : just return
- // 1 => 1 : just return
- // -1 => either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
- // See also: "Semaphores in Plan 9" by Mullender & Cox
- //
- // Note: Forcing a transition from "-1" to "1" on an unpark() means
- // that it will take two back-to-back park() calls for the owning
- // thread to block. This has the benefit of forcing a spurious return
- // from the first park() call after an unpark() call which will help
- // shake out uses of park() and unpark() without condition variables.
-
- if (Atomic::xchg(1, &_Event) >= 0) return;
-
- // Wait for the thread associated with the event to vacate
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "mutex_lock");
- int AnyWaiters = _nParked;
- assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "mutex_unlock");
- if (AnyWaiters != 0) {
- // Note that we signal() *after* dropping the lock for "immortal" Events.
- // This is safe and avoids a common class of futile wakeups. In rare
- // circumstances this can cause a thread to return prematurely from
- // cond_{timed}wait() but the spurious wakeup is benign and the victim
- // will simply re-test the condition and re-park itself.
- // This provides particular benefit if the underlying platform does not
- // provide wait morphing.
- status = pthread_cond_signal(_cond);
- assert_status(status == 0, status, "cond_signal");
- }
-}
-
-
-// JSR166
-// -------------------------------------------------------
-
-// The solaris and linux implementations of park/unpark are fairly
-// conservative for now, but can be improved. They currently use a
-// mutex/condvar pair, plus a a count.
-// Park decrements count if > 0, else does a condvar wait. Unpark
-// sets count to 1 and signals condvar. Only one thread ever waits
-// on the condvar. Contention seen when trying to park implies that someone
-// is unparking you, so don't wait. And spurious returns are fine, so there
-// is no need to track notifications.
-
-// This code is common to linux and solaris and will be moved to a
-// common place in dolphin.
-//
-// The passed in time value is either a relative time in nanoseconds
-// or an absolute time in milliseconds. Either way it has to be unpacked
-// into suitable seconds and nanoseconds components and stored in the
-// given timespec structure.
-// Given time is a 64-bit value and the time_t used in the timespec is only
-// a signed-32-bit value (except on 64-bit Linux) we have to watch for
-// overflow if times way in the future are given. Further on Solaris versions
-// prior to 10 there is a restriction (see cond_timedwait) that the specified
-// number of seconds, in abstime, is less than current_time + 100,000,000.
-// As it will be 28 years before "now + 100000000" will overflow we can
-// ignore overflow and just impose a hard-limit on seconds using the value
-// of "now + 100,000,000". This places a limit on the timeout of about 3.17
-// years from "now".
-
-static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
- assert(time > 0, "convertTime");
- time_t max_secs = 0;
-
- if (!os::supports_monotonic_clock() || isAbsolute) {
- struct timeval now;
- int status = gettimeofday(&now, NULL);
- assert(status == 0, "gettimeofday");
-
- max_secs = now.tv_sec + MAX_SECS;
-
- if (isAbsolute) {
- jlong secs = time / 1000;
- if (secs > max_secs) {
- absTime->tv_sec = max_secs;
- } else {
- absTime->tv_sec = secs;
- }
- absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
- } else {
- jlong secs = time / NANOSECS_PER_SEC;
- if (secs >= MAX_SECS) {
- absTime->tv_sec = max_secs;
- absTime->tv_nsec = 0;
- } else {
- absTime->tv_sec = now.tv_sec + secs;
- absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
- if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
- absTime->tv_nsec -= NANOSECS_PER_SEC;
- ++absTime->tv_sec; // note: this must be <= max_secs
- }
- }
- }
- } else {
- // must be relative using monotonic clock
- struct timespec now;
- int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
- assert_status(status == 0, status, "clock_gettime");
- max_secs = now.tv_sec + MAX_SECS;
- jlong secs = time / NANOSECS_PER_SEC;
- if (secs >= MAX_SECS) {
- absTime->tv_sec = max_secs;
- absTime->tv_nsec = 0;
- } else {
- absTime->tv_sec = now.tv_sec + secs;
- absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
- if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
- absTime->tv_nsec -= NANOSECS_PER_SEC;
- ++absTime->tv_sec; // note: this must be <= max_secs
- }
- }
- }
- assert(absTime->tv_sec >= 0, "tv_sec < 0");
- assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
- assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
- assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
-}
-
-void Parker::park(bool isAbsolute, jlong time) {
- // Ideally we'd do something useful while spinning, such
- // as calling unpackTime().
-
- // Optional fast-path check:
- // Return immediately if a permit is available.
- // We depend on Atomic::xchg() having full barrier semantics
- // since we are doing a lock-free update to _counter.
- if (Atomic::xchg(0, &_counter) > 0) return;
-
- Thread* thread = Thread::current();
- assert(thread->is_Java_thread(), "Must be JavaThread");
- JavaThread *jt = (JavaThread *)thread;
-
- // Optional optimization -- avoid state transitions if there's an interrupt pending.
- // Check interrupt before trying to wait
- if (Thread::is_interrupted(thread, false)) {
- return;
- }
-
- // Next, demultiplex/decode time arguments
- timespec absTime;
- if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
- return;
- }
- if (time > 0) {
- unpackTime(&absTime, isAbsolute, time);
- }
-
-
- // Enter safepoint region
- // Beware of deadlocks such as 6317397.
- // The per-thread Parker:: mutex is a classic leaf-lock.
- // In particular a thread must never block on the Threads_lock while
- // holding the Parker:: mutex. If safepoints are pending both the
- // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
- ThreadBlockInVM tbivm(jt);
-
- // Don't wait if cannot get lock since interference arises from
- // unblocking. Also. check interrupt before trying wait
- if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
- return;
- }
-
- int status;
- if (_counter > 0) { // no wait needed
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other and Java-level accesses.
- OrderAccess::fence();
- return;
- }
-
-#ifdef ASSERT
- // Don't catch signals while blocked; let the running threads have the signals.
- // (This allows a debugger to break into the running thread.)
- sigset_t oldsigs;
- sigemptyset(&oldsigs);
- sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
- pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
-#endif
-
- OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
-
- assert(_cur_index == -1, "invariant");
- if (time == 0) {
- _cur_index = REL_INDEX; // arbitrary choice when not timed
- status = pthread_cond_wait(&_cond[_cur_index], _mutex);
- } else {
- _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
- status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
- }
- _cur_index = -1;
- assert_status(status == 0 || status == EINTR ||
- status == ETIME || status == ETIMEDOUT,
- status, "cond_timedwait");
-
-#ifdef ASSERT
- pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
-#endif
-
- _counter = 0;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- // Paranoia to ensure our locked and lock-free paths interact
- // correctly with each other and Java-level accesses.
- OrderAccess::fence();
-
- // If externally suspended while waiting, re-suspend
- if (jt->handle_special_suspend_equivalent_condition()) {
- jt->java_suspend_self();
- }
-}
-
-void Parker::unpark() {
- int status = pthread_mutex_lock(_mutex);
- assert_status(status == 0, status, "invariant");
- const int s = _counter;
- _counter = 1;
- // must capture correct index before unlocking
- int index = _cur_index;
- status = pthread_mutex_unlock(_mutex);
- assert_status(status == 0, status, "invariant");
- if (s < 1 && index != -1) {
- // thread is definitely parked
- status = pthread_cond_signal(&_cond[index]);
- assert_status(status == 0, status, "invariant");
- }
-}
-
-
extern char** environ;
// Run the specified command in a separate process. Return its exit value,
--- a/hotspot/src/os/linux/vm/os_linux.hpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.hpp Tue May 30 17:14:52 2017 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,7 +160,6 @@
static sigset_t* unblocked_signals();
static sigset_t* vm_signals();
- static sigset_t* allowdebug_blocked_signals();
// For signal-chaining
static struct sigaction *get_chained_signal_action(int sig);
@@ -207,13 +206,6 @@
static bool os_version_is_known();
static uint32_t os_version();
- // pthread_cond clock suppport
- private:
- static pthread_condattr_t _condattr[1];
-
- public:
- static pthread_condattr_t* condAttr() { return _condattr; }
-
// Stack repair handling
// none present
@@ -302,65 +294,4 @@
}
};
-
-class PlatformEvent : public CHeapObj<mtInternal> {
- private:
- double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
- volatile int _Event;
- volatile int _nParked;
- pthread_mutex_t _mutex[1];
- pthread_cond_t _cond[1];
- double PostPad[2];
- Thread * _Assoc;
-
- public: // TODO-FIXME: make dtor private
- ~PlatformEvent() { guarantee(0, "invariant"); }
-
- public:
- PlatformEvent() {
- int status;
- status = pthread_cond_init(_cond, os::Linux::condAttr());
- assert_status(status == 0, status, "cond_init");
- status = pthread_mutex_init(_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- _Event = 0;
- _nParked = 0;
- _Assoc = NULL;
- }
-
- // Use caution with reset() and fired() -- they may require MEMBARs
- void reset() { _Event = 0; }
- int fired() { return _Event; }
- void park();
- void unpark();
- int park(jlong millis); // relative timed-wait only
- void SetAssociation(Thread * a) { _Assoc = a; }
-};
-
-class PlatformParker : public CHeapObj<mtInternal> {
- protected:
- enum {
- REL_INDEX = 0,
- ABS_INDEX = 1
- };
- int _cur_index; // which cond is in use: -1, 0, 1
- pthread_mutex_t _mutex[1];
- pthread_cond_t _cond[2]; // one for relative times and one for abs.
-
- public: // TODO-FIXME: make dtor private
- ~PlatformParker() { guarantee(0, "invariant"); }
-
- public:
- PlatformParker() {
- int status;
- status = pthread_cond_init(&_cond[REL_INDEX], os::Linux::condAttr());
- assert_status(status == 0, status, "cond_init rel");
- status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
- assert_status(status == 0, status, "cond_init abs");
- status = pthread_mutex_init(_mutex, NULL);
- assert_status(status == 0, status, "mutex_init");
- _cur_index = -1; // mark as unused
- }
-};
-
#endif // OS_LINUX_VM_OS_LINUX_HPP
--- a/hotspot/src/os/posix/vm/os_posix.cpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.cpp Tue May 30 17:14:52 2017 -0400
@@ -31,13 +31,14 @@
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
-#include <signal.h>
-#include <unistd.h>
-#include <sys/resource.h>
-#include <sys/utsname.h>
+#include <dlfcn.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
+#include <sys/resource.h>
+#include <sys/utsname.h>
+#include <time.h>
+#include <unistd.h>
// Todo: provide a os::get_max_process_id() or similar. Number of processes
// may have been configured, can be read more accurately from proc fs etc.
@@ -1394,3 +1395,557 @@
}
#endif // __APPLE__
+
+
+// Shared pthread_mutex/cond based PlatformEvent implementation.
+// Not currently usable by Solaris.
+
+#ifndef SOLARIS
+
+// Shared condattr object for use with relative timed-waits. Will be associated
+// with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
+// but otherwise whatever default is used by the platform - generally the
+// time-of-day clock.
+static pthread_condattr_t _condAttr[1];
+
+// Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
+// all systems (e.g. FreeBSD) map the default to "normal".
+static pthread_mutexattr_t _mutexAttr[1];
+
+// common basic initialization that is always supported
+static void pthread_init_common(void) {
+ int status;
+ if ((status = pthread_condattr_init(_condAttr)) != 0) {
+ fatal("pthread_condattr_init: %s", os::strerror(status));
+ }
+ if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
+ fatal("pthread_mutexattr_init: %s", os::strerror(status));
+ }
+ if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
+ fatal("pthread_mutexattr_settype: %s", os::strerror(status));
+ }
+}
+
+// Not all POSIX types and API's are available on all notionally "posix"
+// platforms. If we have build-time support then we will check for actual
+// runtime support via dlopen/dlsym lookup. This allows for running on an
+// older OS version compared to the build platform. But if there is no
+// build time support then there cannot be any runtime support as we do not
+// know what the runtime types would be (for example clockid_t might be an
+// int or int64_t).
+//
+#ifdef SUPPORTS_CLOCK_MONOTONIC
+
+// This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
+
+static int (*_clock_gettime)(clockid_t, struct timespec *);
+static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
+
+static bool _use_clock_monotonic_condattr;
+
+// Determine what POSIX API's are present and do appropriate
+// configuration.
+void os::Posix::init(void) {
+
+ // NOTE: no logging available when this is called. Put logging
+ // statements in init_2().
+
+ // Copied from os::Linux::clock_init(). The duplication is temporary.
+
+ // 1. Check for CLOCK_MONOTONIC support.
+
+ void* handle = NULL;
+
+ // For linux we need librt, for other OS we can find
+ // this function in regular libc.
+#ifdef NEEDS_LIBRT
+ // We do dlopen's in this particular order due to bug in linux
+ // dynamic loader (see 6348968) leading to crash on exit.
+ handle = dlopen("librt.so.1", RTLD_LAZY);
+ if (handle == NULL) {
+ handle = dlopen("librt.so", RTLD_LAZY);
+ }
+#endif
+
+ if (handle == NULL) {
+ handle = RTLD_DEFAULT;
+ }
+
+ _clock_gettime = NULL;
+
+ int (*clock_getres_func)(clockid_t, struct timespec*) =
+ (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
+ int (*clock_gettime_func)(clockid_t, struct timespec*) =
+ (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
+ if (clock_getres_func != NULL && clock_gettime_func != NULL) {
+ // We assume that if both clock_gettime and clock_getres support
+ // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock.
+ struct timespec res;
+ struct timespec tp;
+ if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
+ clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
+ // Yes, monotonic clock is supported.
+ _clock_gettime = clock_gettime_func;
+ } else {
+#ifdef NEEDS_LIBRT
+ // Close librt if there is no monotonic clock.
+ if (handle != RTLD_DEFAULT) {
+ dlclose(handle);
+ }
+#endif
+ }
+ }
+
+ // 2. Check for pthread_condattr_setclock support.
+
+ _pthread_condattr_setclock = NULL;
+
+ // libpthread is already loaded.
+ int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
+ (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
+ "pthread_condattr_setclock");
+ if (condattr_setclock_func != NULL) {
+ _pthread_condattr_setclock = condattr_setclock_func;
+ }
+
+ // Now do general initialization.
+
+ pthread_init_common();
+
+ int status;
+ if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) {
+ if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
+ if (status == EINVAL) {
+ _use_clock_monotonic_condattr = false;
+ warning("Unable to use monotonic clock with relative timed-waits" \
+ " - changes to the time-of-day clock may have adverse affects");
+ } else {
+ fatal("pthread_condattr_setclock: %s", os::strerror(status));
+ }
+ } else {
+ _use_clock_monotonic_condattr = true;
+ }
+ } else {
+ _use_clock_monotonic_condattr = false;
+ }
+}
+
+void os::Posix::init_2(void) {
+ log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
+ (_clock_gettime != NULL ? "" : " not"));
+ log_info(os)("Use of pthread_condattr_setclock is%s supported",
+ (_pthread_condattr_setclock != NULL ? "" : " not"));
+ log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
+ _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
+}
+
+#else // !SUPPORTS_CLOCK_MONOTONIC
+
+void os::Posix::init(void) {
+ pthread_init_common();
+}
+
+void os::Posix::init_2(void) {
+ log_info(os)("Use of CLOCK_MONOTONIC is not supported");
+ log_info(os)("Use of pthread_condattr_setclock is not supported");
+ log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
+}
+
+#endif // SUPPORTS_CLOCK_MONOTONIC
+
+os::PlatformEvent::PlatformEvent() {
+ int status = pthread_cond_init(_cond, _condAttr);
+ assert_status(status == 0, status, "cond_init");
+ status = pthread_mutex_init(_mutex, _mutexAttr);
+ assert_status(status == 0, status, "mutex_init");
+ _event = 0;
+ _nParked = 0;
+}
+
+// Utility to convert the given timeout to an absolute timespec
+// (based on the appropriate clock) to use with pthread_cond_timewait.
+// The clock queried here must be the clock used to manage the
+// timeout of the condition variable.
+//
+// The passed in timeout value is either a relative time in nanoseconds
+// or an absolute time in milliseconds. A relative timeout will be
+// associated with CLOCK_MONOTONIC if available; otherwise, or if absolute,
+// the default time-of-day clock will be used.
+
+// Given time is a 64-bit value and the time_t used in the timespec is
+// sometimes a signed-32-bit value we have to watch for overflow if times
+// way in the future are given. Further on Solaris versions
+// prior to 10 there is a restriction (see cond_timedwait) that the specified
+// number of seconds, in abstime, is less than current_time + 100000000.
+// As it will be over 20 years before "now + 100000000" will overflow we can
+// ignore overflow and just impose a hard-limit on seconds using the value
+// of "now + 100000000". This places a limit on the timeout of about 3.17
+// years from "now".
+//
+#define MAX_SECS 100000000
+
+// Calculate a new absolute time that is "timeout" nanoseconds from "now".
+// "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
+// on which clock is being used).
+static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
+ jlong now_part_sec, jlong unit) {
+ time_t max_secs = now_sec + MAX_SECS;
+
+ jlong seconds = timeout / NANOUNITS;
+ timeout %= NANOUNITS; // remaining nanos
+
+ if (seconds >= MAX_SECS) {
+ // More seconds than we can add, so pin to max_secs.
+ abstime->tv_sec = max_secs;
+ abstime->tv_nsec = 0;
+ } else {
+ abstime->tv_sec = now_sec + seconds;
+ long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
+ if (nanos >= NANOUNITS) { // overflow
+ abstime->tv_sec += 1;
+ nanos -= NANOUNITS;
+ }
+ abstime->tv_nsec = nanos;
+ }
+}
+
+// Unpack the given deadline in milliseconds since the epoch, into the given timespec.
+// The current time in seconds is also passed in to enforce an upper bound as discussed above.
+static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
+ time_t max_secs = now_sec + MAX_SECS;
+
+ jlong seconds = deadline / MILLIUNITS;
+ jlong millis = deadline % MILLIUNITS;
+
+ if (seconds >= max_secs) {
+ // Absolute seconds exceeds allowed max, so pin to max_secs.
+ abstime->tv_sec = max_secs;
+ abstime->tv_nsec = 0;
+ } else {
+ abstime->tv_sec = seconds;
+ abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
+ }
+}
+
+static void to_abstime(timespec* abstime, jlong timeout, bool isAbsolute) {
+ DEBUG_ONLY(int max_secs = MAX_SECS;)
+
+ if (timeout < 0) {
+ timeout = 0;
+ }
+
+#ifdef SUPPORTS_CLOCK_MONOTONIC
+
+ if (_use_clock_monotonic_condattr && !isAbsolute) {
+ struct timespec now;
+ int status = _clock_gettime(CLOCK_MONOTONIC, &now);
+ assert_status(status == 0, status, "clock_gettime");
+ calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
+ DEBUG_ONLY(max_secs += now.tv_sec;)
+ } else {
+
+#else
+
+ { // Match the block scope.
+
+#endif // SUPPORTS_CLOCK_MONOTONIC
+
+ // Time-of-day clock is all we can reliably use.
+ struct timeval now;
+ int status = gettimeofday(&now, NULL);
+ assert_status(status == 0, errno, "gettimeofday");
+ if (isAbsolute) {
+ unpack_abs_time(abstime, timeout, now.tv_sec);
+ } else {
+ calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS);
+ }
+ DEBUG_ONLY(max_secs += now.tv_sec;)
+ }
+
+ assert(abstime->tv_sec >= 0, "tv_sec < 0");
+ assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
+ assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
+ assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
+}
+
+// PlatformEvent
+//
+// Assumption:
+// Only one parker can exist on an event, which is why we allocate
+// them per-thread. Multiple unparkers can coexist.
+//
+// _event serves as a restricted-range semaphore.
+// -1 : thread is blocked, i.e. there is a waiter
+// 0 : neutral: thread is running or ready,
+// could have been signaled after a wait started
+// 1 : signaled - thread is running or ready
+//
+// Having three states allows for some detection of bad usage - see
+// comments on unpark().
+
+void os::PlatformEvent::park() { // AKA "down()"
+ // Transitions for _event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _event to 0 before returning
+
+ // Invariant: Only the thread associated with the PlatformEvent
+ // may call park().
+ assert(_nParked == 0, "invariant");
+
+ int v;
+
+ // atomically decrement _event
+ for (;;) {
+ v = _event;
+ if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+ }
+ guarantee(v >= 0, "invariant");
+
+ if (v == 0) { // Do this the hard way by blocking ...
+ int status = pthread_mutex_lock(_mutex);
+ assert_status(status == 0, status, "mutex_lock");
+ guarantee(_nParked == 0, "invariant");
+ ++_nParked;
+ while (_event < 0) {
+ // OS-level "spurious wakeups" are ignored
+ status = pthread_cond_wait(_cond, _mutex);
+ assert_status(status == 0, status, "cond_wait");
+ }
+ --_nParked;
+
+ _event = 0;
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "mutex_unlock");
+ // Paranoia to ensure our locked and lock-free paths interact
+ // correctly with each other.
+ OrderAccess::fence();
+ }
+ guarantee(_event >= 0, "invariant");
+}
+
+int os::PlatformEvent::park(jlong millis) {
+ // Transitions for _event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _event to 0 before returning
+
+ // Invariant: Only the thread associated with the Event/PlatformEvent
+ // may call park().
+ assert(_nParked == 0, "invariant");
+
+ int v;
+ // atomically decrement _event
+ for (;;) {
+ v = _event;
+ if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+ }
+ guarantee(v >= 0, "invariant");
+
+ if (v == 0) { // Do this the hard way by blocking ...
+ struct timespec abst;
+ to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false);
+
+ int ret = OS_TIMEOUT;
+ int status = pthread_mutex_lock(_mutex);
+ assert_status(status == 0, status, "mutex_lock");
+ guarantee(_nParked == 0, "invariant");
+ ++_nParked;
+
+ while (_event < 0) {
+ status = pthread_cond_timedwait(_cond, _mutex, &abst);
+ assert_status(status == 0 || status == ETIMEDOUT,
+ status, "cond_timedwait");
+ // OS-level "spurious wakeups" are ignored unless the archaic
+ // FilterSpuriousWakeups is set false. That flag should be obsoleted.
+ if (!FilterSpuriousWakeups) break;
+ if (status == ETIMEDOUT) break;
+ }
+ --_nParked;
+
+ if (_event >= 0) {
+ ret = OS_OK;
+ }
+
+ _event = 0;
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "mutex_unlock");
+ // Paranoia to ensure our locked and lock-free paths interact
+ // correctly with each other.
+ OrderAccess::fence();
+ return ret;
+ }
+ return OS_OK;
+}
+
+void os::PlatformEvent::unpark() {
+ // Transitions for _event:
+ // 0 => 1 : just return
+ // 1 => 1 : just return
+ // -1 => either 0 or 1; must signal target thread
+ // That is, we can safely transition _event from -1 to either
+ // 0 or 1.
+ // See also: "Semaphores in Plan 9" by Mullender & Cox
+ //
+ // Note: Forcing a transition from "-1" to "1" on an unpark() means
+ // that it will take two back-to-back park() calls for the owning
+ // thread to block. This has the benefit of forcing a spurious return
+ // from the first park() call after an unpark() call which will help
+ // shake out uses of park() and unpark() without checking state conditions
+ // properly. This spurious return doesn't manifest itself in any user code
+ // but only in the correctly written condition checking loops of ObjectMonitor,
+ // Mutex/Monitor, Thread::muxAcquire and os::sleep
+
+ if (Atomic::xchg(1, &_event) >= 0) return;
+
+ int status = pthread_mutex_lock(_mutex);
+ assert_status(status == 0, status, "mutex_lock");
+ int anyWaiters = _nParked;
+ assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "mutex_unlock");
+
+ // Note that we signal() *after* dropping the lock for "immortal" Events.
+ // This is safe and avoids a common class of futile wakeups. In rare
+ // circumstances this can cause a thread to return prematurely from
+ // cond_{timed}wait() but the spurious wakeup is benign and the victim
+ // will simply re-test the condition and re-park itself.
+ // This provides particular benefit if the underlying platform does not
+ // provide wait morphing.
+
+ if (anyWaiters != 0) {
+ status = pthread_cond_signal(_cond);
+ assert_status(status == 0, status, "cond_signal");
+ }
+}
+
+// JSR166 support
+
+ os::PlatformParker::PlatformParker() {
+ int status;
+ status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
+ assert_status(status == 0, status, "cond_init rel");
+ status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
+ assert_status(status == 0, status, "cond_init abs");
+ status = pthread_mutex_init(_mutex, _mutexAttr);
+ assert_status(status == 0, status, "mutex_init");
+ _cur_index = -1; // mark as unused
+}
+
+// Parker::park decrements count if > 0, else does a condvar wait. Unpark
+// sets count to 1 and signals condvar. Only one thread ever waits
+// on the condvar. Contention seen when trying to park implies that someone
+// is unparking you, so don't wait. And spurious returns are fine, so there
+// is no need to track notifications.
+
+void Parker::park(bool isAbsolute, jlong time) {
+
+ // Optional fast-path check:
+ // Return immediately if a permit is available.
+ // We depend on Atomic::xchg() having full barrier semantics
+ // since we are doing a lock-free update to _counter.
+ if (Atomic::xchg(0, &_counter) > 0) return;
+
+ Thread* thread = Thread::current();
+ assert(thread->is_Java_thread(), "Must be JavaThread");
+ JavaThread *jt = (JavaThread *)thread;
+
+ // Optional optimization -- avoid state transitions if there's
+ // an interrupt pending.
+ if (Thread::is_interrupted(thread, false)) {
+ return;
+ }
+
+ // Next, demultiplex/decode time arguments
+ struct timespec absTime;
+ if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
+ return;
+ }
+ if (time > 0) {
+ to_abstime(&absTime, time, isAbsolute);
+ }
+
+ // Enter safepoint region
+ // Beware of deadlocks such as 6317397.
+ // The per-thread Parker:: mutex is a classic leaf-lock.
+ // In particular a thread must never block on the Threads_lock while
+ // holding the Parker:: mutex. If safepoints are pending both the
+ // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
+ ThreadBlockInVM tbivm(jt);
+
+ // Don't wait if cannot get lock since interference arises from
+ // unparking. Also re-check interrupt before trying wait.
+ if (Thread::is_interrupted(thread, false) ||
+ pthread_mutex_trylock(_mutex) != 0) {
+ return;
+ }
+
+ int status;
+ if (_counter > 0) { // no wait needed
+ _counter = 0;
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "invariant");
+ // Paranoia to ensure our locked and lock-free paths interact
+ // correctly with each other and Java-level accesses.
+ OrderAccess::fence();
+ return;
+ }
+
+ OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+ jt->set_suspend_equivalent();
+ // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
+
+ assert(_cur_index == -1, "invariant");
+ if (time == 0) {
+ _cur_index = REL_INDEX; // arbitrary choice when not timed
+ status = pthread_cond_wait(&_cond[_cur_index], _mutex);
+ assert_status(status == 0, status, "cond_timedwait");
+ }
+ else {
+ _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+ status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
+ assert_status(status == 0 || status == ETIMEDOUT,
+ status, "cond_timedwait");
+ }
+ _cur_index = -1;
+
+ _counter = 0;
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "invariant");
+ // Paranoia to ensure our locked and lock-free paths interact
+ // correctly with each other and Java-level accesses.
+ OrderAccess::fence();
+
+ // If externally suspended while waiting, re-suspend
+ if (jt->handle_special_suspend_equivalent_condition()) {
+ jt->java_suspend_self();
+ }
+}
+
+void Parker::unpark() {
+ int status = pthread_mutex_lock(_mutex);
+ assert_status(status == 0, status, "invariant");
+ const int s = _counter;
+ _counter = 1;
+ // must capture correct index before unlocking
+ int index = _cur_index;
+ status = pthread_mutex_unlock(_mutex);
+ assert_status(status == 0, status, "invariant");
+
+ // Note that we signal() *after* dropping the lock for "immortal" Events.
+ // This is safe and avoids a common class of futile wakeups. In rare
+ // circumstances this can cause a thread to return prematurely from
+ // cond_{timed}wait() but the spurious wakeup is benign and the victim
+ // will simply re-test the condition and re-park itself.
+ // This provides particular benefit if the underlying platform does not
+ // provide wait morphing.
+
+ if (s < 1 && index != -1) {
+ // thread is definitely parked
+ status = pthread_cond_signal(&_cond[index]);
+ assert_status(status == 0, status, "invariant");
+ }
+}
+
+
+#endif // !SOLARIS
--- a/hotspot/src/os/posix/vm/os_posix.hpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/posix/vm/os_posix.hpp Tue May 30 17:14:52 2017 -0400
@@ -53,6 +53,9 @@
static size_t _vm_internal_thread_min_stack_allowed;
public:
+ static void init(void); // early initialization - no logging available
+ static void init_2(void);// later initialization - logging available
+
// Return default stack size for the specified thread type
static size_t default_stack_size(os::ThreadType thr_type);
// Check and sets minimum stack sizes
@@ -102,7 +105,6 @@
// On error, it will return NULL and set errno. The content of 'outbuf' is undefined.
// On truncation error ('outbuf' too small), it will return NULL and set errno to ENAMETOOLONG.
static char* realpath(const char* filename, char* outbuf, size_t outbuflen);
-
};
/*
@@ -125,4 +127,67 @@
sigjmp_buf _jmpbuf;
};
+#ifndef SOLARIS
+
+/*
+ * This is the platform-specific implementation underpinning
+ * the ParkEvent class, which itself underpins Java-level monitor
+ * operations. See park.hpp for details.
+ * These event objects are type-stable and immortal - we never delete them.
+ * Events are associated with a thread for the lifetime of the thread.
+ */
+class PlatformEvent : public CHeapObj<mtInternal> {
+ private:
+ double cachePad[4]; // Increase odds that _mutex is sole occupant of cache line
+ volatile int _event; // Event count/permit: -1, 0 or 1
+ volatile int _nParked; // Indicates if associated thread is blocked: 0 or 1
+ pthread_mutex_t _mutex[1]; // Native mutex for locking
+ pthread_cond_t _cond[1]; // Native condition variable for blocking
+ double postPad[2];
+
+ protected: // TODO-FIXME: make dtor private
+ ~PlatformEvent() { guarantee(false, "invariant"); } // immortal so can't delete
+
+ public:
+ PlatformEvent();
+ void park();
+ int park(jlong millis);
+ void unpark();
+
+ // Use caution with reset() and fired() -- they may require MEMBARs
+ void reset() { _event = 0; }
+ int fired() { return _event; }
+};
+
+// JSR166 support
+// PlatformParker provides the platform dependent base class for the
+// Parker class. It basically provides the internal data structures:
+// - mutex and convars
+// which are then used directly by the Parker methods defined in the OS
+// specific implementation files.
+// There is significant overlap between the funcionality supported in the
+// combination of Parker+PlatformParker and PlatformEvent (above). If Parker
+// were more like ObjectMonitor we could use PlatformEvent in both (with some
+// API updates of course). But Parker methods use fastpaths that break that
+// level of encapsulation - so combining the two remains a future project.
+
+class PlatformParker : public CHeapObj<mtInternal> {
+ protected:
+ enum {
+ REL_INDEX = 0,
+ ABS_INDEX = 1
+ };
+ int _cur_index; // which cond is in use: -1, 0, 1
+ pthread_mutex_t _mutex[1];
+ pthread_cond_t _cond[2]; // one for relative times and one for absolute
+
+ public: // TODO-FIXME: make dtor private
+ ~PlatformParker() { guarantee(false, "invariant"); }
+
+ public:
+ PlatformParker();
+};
+
+#endif // !SOLARIS
+
#endif // OS_POSIX_VM_OS_POSIX_HPP
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Tue May 30 17:14:52 2017 -0400
@@ -1014,7 +1014,7 @@
}
debug_only(static bool signal_sets_initialized = false);
-static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
+static sigset_t unblocked_sigs, vm_sigs;
bool os::Solaris::is_sig_ignored(int sig) {
struct sigaction oact;
@@ -1045,7 +1045,6 @@
// In reality, though, unblocking these signals is really a nop, since
// these signals are not blocked by default.
sigemptyset(&unblocked_sigs);
- sigemptyset(&allowdebug_blocked_sigs);
sigaddset(&unblocked_sigs, SIGILL);
sigaddset(&unblocked_sigs, SIGSEGV);
sigaddset(&unblocked_sigs, SIGBUS);
@@ -1055,15 +1054,12 @@
if (!ReduceSignalUsage) {
if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
}
if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
- sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
// Fill in signals that are blocked by all but the VM thread.
@@ -1091,13 +1087,6 @@
return &vm_sigs;
}
-// These are signals that are blocked during cond_wait to allow debugger in
-sigset_t* os::Solaris::allowdebug_blocked_signals() {
- assert(signal_sets_initialized, "Not initialized");
- return &allowdebug_blocked_sigs;
-}
-
-
void _handle_uncaught_cxx_exception() {
VMError::report_and_die("An uncaught C++ exception");
}
@@ -5352,14 +5341,6 @@
return;
}
-#ifdef ASSERT
- // Don't catch signals while blocked; let the running threads have the signals.
- // (This allows a debugger to break into the running thread.)
- sigset_t oldsigs;
- sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
- pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
-#endif
-
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
jt->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
@@ -5383,9 +5364,6 @@
status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
-#ifdef ASSERT
- pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
-#endif
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp Mon May 29 20:48:10 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp Tue May 30 17:14:52 2017 -0400
@@ -259,7 +259,6 @@
static sigset_t* unblocked_signals();
static sigset_t* vm_signals();
- static sigset_t* allowdebug_blocked_signals();
// %%% Following should be promoted to os.hpp:
// Trace number of created threads