author | pliden |
Fri, 11 Apr 2014 11:00:12 +0200 | |
changeset 24093 | 095cc0a63ed9 |
parent 22881 | b16d7faa638d |
child 24351 | 61b33cc6d3cf |
permissions | -rw-r--r-- |
6975 | 1 |
/* |
22753
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
2 |
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. |
6975 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "classfile/vmSymbols.hpp" |
|
27 |
#include "memory/resourceArea.hpp" |
|
28 |
#include "oops/markOop.hpp" |
|
29 |
#include "oops/oop.inline.hpp" |
|
30 |
#include "runtime/handles.inline.hpp" |
|
31 |
#include "runtime/interfaceSupport.hpp" |
|
32 |
#include "runtime/mutexLocker.hpp" |
|
33 |
#include "runtime/objectMonitor.hpp" |
|
34 |
#include "runtime/objectMonitor.inline.hpp" |
|
35 |
#include "runtime/osThread.hpp" |
|
36 |
#include "runtime/stubRoutines.hpp" |
|
14583
d70ee55535f4
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
13728
diff
changeset
|
37 |
#include "runtime/thread.inline.hpp" |
7397 | 38 |
#include "services/threadService.hpp" |
18025 | 39 |
#include "trace/tracing.hpp" |
40 |
#include "trace/traceMacros.hpp" |
|
7397 | 41 |
#include "utilities/dtrace.hpp" |
18025 | 42 |
#include "utilities/macros.hpp" |
7397 | 43 |
#include "utilities/preserveException.hpp" |
44 |
#ifdef TARGET_OS_FAMILY_linux |
|
45 |
# include "os_linux.inline.hpp" |
|
46 |
#endif |
|
47 |
#ifdef TARGET_OS_FAMILY_solaris |
|
48 |
# include "os_solaris.inline.hpp" |
|
49 |
#endif |
|
50 |
#ifdef TARGET_OS_FAMILY_windows |
|
51 |
# include "os_windows.inline.hpp" |
|
52 |
#endif |
|
10565 | 53 |
#ifdef TARGET_OS_FAMILY_bsd |
54 |
# include "os_bsd.inline.hpp" |
|
55 |
#endif |
|
6975 | 56 |
|
22819
f88b9c394e42
8019973: PPC64 (part 11): Fix IA64 preprocessor conditionals on AIX.
goetz
parents:
18025
diff
changeset
|
57 |
#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64) |
6975 | 58 |
// Need to inhibit inlining for older versions of GCC to avoid build-time failures |
59 |
#define ATTR __attribute__((noinline)) |
|
60 |
#else |
|
61 |
#define ATTR |
|
62 |
#endif |
|
63 |
||
64 |
||
65 |
#ifdef DTRACE_ENABLED |
|
66 |
||
67 |
// Only bother with this argument setup if dtrace is available |
|
68 |
// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. |
|
69 |
||
10739 | 70 |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
71 |
#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ |
10739 | 72 |
char* bytes = NULL; \ |
73 |
int len = 0; \ |
|
74 |
jlong jtid = SharedRuntime::get_java_tid(thread); \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
75 |
Symbol* klassname = ((oop)obj)->klass()->name(); \ |
10739 | 76 |
if (klassname != NULL) { \ |
77 |
bytes = (char*)klassname->bytes(); \ |
|
78 |
len = klassname->utf8_length(); \ |
|
79 |
} |
|
80 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
81 |
#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
10739 | 82 |
{ \ |
83 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
84 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
10739 | 85 |
HOTSPOT_MONITOR_WAIT(jtid, \ |
86 |
(monitor), bytes, len, (millis)); \ |
|
87 |
} \ |
|
88 |
} |
|
89 |
||
90 |
#define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER |
|
91 |
#define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED |
|
92 |
#define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT |
|
93 |
#define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY |
|
94 |
#define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL |
|
95 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
96 |
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
10739 | 97 |
{ \ |
98 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
99 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
10739 | 100 |
HOTSPOT_MONITOR_##probe(jtid, \ |
101 |
(uintptr_t)(monitor), bytes, len); \ |
|
102 |
} \ |
|
103 |
} |
|
104 |
||
6975 | 105 |
#else // ndef DTRACE_ENABLED |
106 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
107 |
#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
108 |
#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} |
6975 | 109 |
|
110 |
#endif // ndef DTRACE_ENABLED |
|
111 |
||
112 |
// Tunables ... |
|
113 |
// The knob* variables are effectively final. Once set they should |
|
114 |
// never be modified hence. Consider using __read_mostly with GCC. |
|
115 |
||
116 |
int ObjectMonitor::Knob_Verbose = 0 ; |
|
117 |
int ObjectMonitor::Knob_SpinLimit = 5000 ; // derived by an external tool - |
|
118 |
static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins |
|
119 |
static int Knob_HandOff = 0 ; |
|
120 |
static int Knob_ReportSettings = 0 ; |
|
121 |
||
122 |
static int Knob_SpinBase = 0 ; // Floor AKA SpinMin |
|
123 |
static int Knob_SpinBackOff = 0 ; // spin-loop backoff |
|
124 |
static int Knob_CASPenalty = -1 ; // Penalty for failed CAS |
|
125 |
static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change |
|
126 |
static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field |
|
127 |
static int Knob_SpinEarly = 1 ; |
|
128 |
static int Knob_SuccEnabled = 1 ; // futile wake throttling |
|
129 |
static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one |
|
130 |
static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs |
|
131 |
static int Knob_Bonus = 100 ; // spin success bonus |
|
132 |
static int Knob_BonusB = 100 ; // spin success bonus |
|
133 |
static int Knob_Penalty = 200 ; // spin failure penalty |
|
134 |
static int Knob_Poverty = 1000 ; |
|
135 |
static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() |
|
136 |
static int Knob_FixedSpin = 0 ; |
|
137 |
static int Knob_OState = 3 ; // Spinner checks thread state of _owner |
|
138 |
static int Knob_UsePause = 1 ; |
|
139 |
static int Knob_ExitPolicy = 0 ; |
|
140 |
static int Knob_PreSpin = 10 ; // 20-100 likely better |
|
141 |
static int Knob_ResetEvent = 0 ; |
|
142 |
static int BackOffMask = 0 ; |
|
143 |
||
144 |
static int Knob_FastHSSEC = 0 ; |
|
145 |
static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee |
|
146 |
static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline |
|
147 |
static volatile int InitDone = 0 ; |
|
148 |
||
149 |
#define TrySpin TrySpin_VaryDuration |
|
150 |
||
151 |
// ----------------------------------------------------------------------------- |
|
152 |
// Theory of operations -- Monitors lists, thread residency, etc: |
|
153 |
// |
|
154 |
// * A thread acquires ownership of a monitor by successfully |
|
155 |
// CAS()ing the _owner field from null to non-null. |
|
156 |
// |
|
157 |
// * Invariant: A thread appears on at most one monitor list -- |
|
158 |
// cxq, EntryList or WaitSet -- at any one time. |
|
159 |
// |
|
160 |
// * Contending threads "push" themselves onto the cxq with CAS |
|
161 |
// and then spin/park. |
|
162 |
// |
|
163 |
// * After a contending thread eventually acquires the lock it must |
|
164 |
// dequeue itself from either the EntryList or the cxq. |
|
165 |
// |
|
166 |
// * The exiting thread identifies and unparks an "heir presumptive" |
|
167 |
// tentative successor thread on the EntryList. Critically, the |
|
168 |
// exiting thread doesn't unlink the successor thread from the EntryList. |
|
169 |
// After having been unparked, the wakee will recontend for ownership of |
|
170 |
// the monitor. The successor (wakee) will either acquire the lock or |
|
171 |
// re-park itself. |
|
172 |
// |
|
173 |
// Succession is provided for by a policy of competitive handoff. |
|
174 |
// The exiting thread does _not_ grant or pass ownership to the |
|
175 |
// successor thread. (This is also referred to as "handoff" succession"). |
|
176 |
// Instead the exiting thread releases ownership and possibly wakes |
|
177 |
// a successor, so the successor can (re)compete for ownership of the lock. |
|
178 |
// If the EntryList is empty but the cxq is populated the exiting |
|
179 |
// thread will drain the cxq into the EntryList. It does so by |
|
180 |
// by detaching the cxq (installing null with CAS) and folding |
|
181 |
// the threads from the cxq into the EntryList. The EntryList is |
|
182 |
// doubly linked, while the cxq is singly linked because of the |
|
183 |
// CAS-based "push" used to enqueue recently arrived threads (RATs). |
|
184 |
// |
|
185 |
// * Concurrency invariants: |
|
186 |
// |
|
187 |
// -- only the monitor owner may access or mutate the EntryList. |
|
188 |
// The mutex property of the monitor itself protects the EntryList |
|
189 |
// from concurrent interference. |
|
190 |
// -- Only the monitor owner may detach the cxq. |
|
191 |
// |
|
192 |
// * The monitor entry list operations avoid locks, but strictly speaking |
|
193 |
// they're not lock-free. Enter is lock-free, exit is not. |
|
194 |
// See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html |
|
195 |
// |
|
196 |
// * The cxq can have multiple concurrent "pushers" but only one concurrent |
|
197 |
// detaching thread. This mechanism is immune from the ABA corruption. |
|
198 |
// More precisely, the CAS-based "push" onto cxq is ABA-oblivious. |
|
199 |
// |
|
200 |
// * Taken together, the cxq and the EntryList constitute or form a |
|
201 |
// single logical queue of threads stalled trying to acquire the lock. |
|
202 |
// We use two distinct lists to improve the odds of a constant-time |
|
22551 | 203 |
// dequeue operation after acquisition (in the ::enter() epilogue) and |
6975 | 204 |
// to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). |
205 |
// A key desideratum is to minimize queue & monitor metadata manipulation |
|
206 |
// that occurs while holding the monitor lock -- that is, we want to |
|
207 |
// minimize monitor lock holds times. Note that even a small amount of |
|
208 |
// fixed spinning will greatly reduce the # of enqueue-dequeue operations |
|
209 |
// on EntryList|cxq. That is, spinning relieves contention on the "inner" |
|
210 |
// locks and monitor metadata. |
|
211 |
// |
|
212 |
// Cxq points to the the set of Recently Arrived Threads attempting entry. |
|
213 |
// Because we push threads onto _cxq with CAS, the RATs must take the form of |
|
214 |
// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when |
|
215 |
// the unlocking thread notices that EntryList is null but _cxq is != null. |
|
216 |
// |
|
217 |
// The EntryList is ordered by the prevailing queue discipline and |
|
218 |
// can be organized in any convenient fashion, such as a doubly-linked list or |
|
219 |
// a circular doubly-linked list. Critically, we want insert and delete operations |
|
220 |
// to operate in constant-time. If we need a priority queue then something akin |
|
221 |
// to Solaris' sleepq would work nicely. Viz., |
|
222 |
// http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. |
|
223 |
// Queue discipline is enforced at ::exit() time, when the unlocking thread |
|
224 |
// drains the cxq into the EntryList, and orders or reorders the threads on the |
|
225 |
// EntryList accordingly. |
|
226 |
// |
|
227 |
// Barring "lock barging", this mechanism provides fair cyclic ordering, |
|
228 |
// somewhat similar to an elevator-scan. |
|
229 |
// |
|
230 |
// * The monitor synchronization subsystem avoids the use of native |
|
231 |
// synchronization primitives except for the narrow platform-specific |
|
232 |
// park-unpark abstraction. See the comments in os_solaris.cpp regarding |
|
233 |
// the semantics of park-unpark. Put another way, this monitor implementation |
|
234 |
// depends only on atomic operations and park-unpark. The monitor subsystem |
|
235 |
// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the |
|
236 |
// underlying OS manages the READY<->RUN transitions. |
|
237 |
// |
|
238 |
// * Waiting threads reside on the WaitSet list -- wait() puts |
|
239 |
// the caller onto the WaitSet. |
|
240 |
// |
|
241 |
// * notify() or notifyAll() simply transfers threads from the WaitSet to |
|
242 |
// either the EntryList or cxq. Subsequent exit() operations will |
|
243 |
// unpark the notifyee. Unparking a notifee in notify() is inefficient - |
|
244 |
// it's likely the notifyee would simply impale itself on the lock held |
|
245 |
// by the notifier. |
|
246 |
// |
|
247 |
// * An interesting alternative is to encode cxq as (List,LockByte) where |
|
248 |
// the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary |
|
249 |
// variable, like _recursions, in the scheme. The threads or Events that form |
|
250 |
// the list would have to be aligned in 256-byte addresses. A thread would |
|
251 |
// try to acquire the lock or enqueue itself with CAS, but exiting threads |
|
252 |
// could use a 1-0 protocol and simply STB to set the LockByte to 0. |
|
253 |
// Note that is is *not* word-tearing, but it does presume that full-word |
|
254 |
// CAS operations are coherent with intermix with STB operations. That's true |
|
255 |
// on most common processors. |
|
256 |
// |
|
257 |
// * See also http://blogs.sun.com/dave |
|
258 |
||
259 |
||
260 |
// ----------------------------------------------------------------------------- |
|
261 |
// Enter support |
|
262 |
||
263 |
bool ObjectMonitor::try_enter(Thread* THREAD) { |
|
264 |
if (THREAD != _owner) { |
|
265 |
if (THREAD->is_lock_owned ((address)_owner)) { |
|
266 |
assert(_recursions == 0, "internal state error"); |
|
267 |
_owner = THREAD ; |
|
268 |
_recursions = 1 ; |
|
269 |
OwnerIsThread = 1 ; |
|
270 |
return true; |
|
271 |
} |
|
272 |
if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
|
273 |
return false; |
|
274 |
} |
|
275 |
return true; |
|
276 |
} else { |
|
277 |
_recursions++; |
|
278 |
return true; |
|
279 |
} |
|
280 |
} |
|
281 |
||
282 |
void ATTR ObjectMonitor::enter(TRAPS) { |
|
283 |
// The following code is ordered to check the most common cases first |
|
284 |
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
|
285 |
Thread * const Self = THREAD ; |
|
286 |
void * cur ; |
|
287 |
||
288 |
cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
|
289 |
if (cur == NULL) { |
|
290 |
// Either ASSERT _recursions == 0 or explicitly set _recursions = 0. |
|
291 |
assert (_recursions == 0 , "invariant") ; |
|
292 |
assert (_owner == Self, "invariant") ; |
|
293 |
// CONSIDER: set or assert OwnerIsThread == 1 |
|
294 |
return ; |
|
295 |
} |
|
296 |
||
297 |
if (cur == Self) { |
|
298 |
// TODO-FIXME: check for integer overflow! BUGID 6557169. |
|
299 |
_recursions ++ ; |
|
300 |
return ; |
|
301 |
} |
|
302 |
||
303 |
if (Self->is_lock_owned ((address)cur)) { |
|
304 |
assert (_recursions == 0, "internal state error"); |
|
305 |
_recursions = 1 ; |
|
306 |
// Commute owner from a thread-specific on-stack BasicLockObject address to |
|
307 |
// a full-fledged "Thread *". |
|
308 |
_owner = Self ; |
|
309 |
OwnerIsThread = 1 ; |
|
310 |
return ; |
|
311 |
} |
|
312 |
||
313 |
// We've encountered genuine contention. |
|
314 |
assert (Self->_Stalled == 0, "invariant") ; |
|
315 |
Self->_Stalled = intptr_t(this) ; |
|
316 |
||
317 |
// Try one round of spinning *before* enqueueing Self |
|
318 |
// and before going through the awkward and expensive state |
|
319 |
// transitions. The following spin is strictly optional ... |
|
320 |
// Note that if we acquire the monitor from an initial spin |
|
321 |
// we forgo posting JVMTI events and firing DTRACE probes. |
|
322 |
if (Knob_SpinEarly && TrySpin (Self) > 0) { |
|
323 |
assert (_owner == Self , "invariant") ; |
|
324 |
assert (_recursions == 0 , "invariant") ; |
|
325 |
assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
|
326 |
Self->_Stalled = 0 ; |
|
327 |
return ; |
|
328 |
} |
|
329 |
||
330 |
assert (_owner != Self , "invariant") ; |
|
331 |
assert (_succ != Self , "invariant") ; |
|
332 |
assert (Self->is_Java_thread() , "invariant") ; |
|
333 |
JavaThread * jt = (JavaThread *) Self ; |
|
334 |
assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
|
335 |
assert (jt->thread_state() != _thread_blocked , "invariant") ; |
|
336 |
assert (this->object() != NULL , "invariant") ; |
|
337 |
assert (_count >= 0, "invariant") ; |
|
338 |
||
339 |
// Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). |
|
340 |
// Ensure the object-monitor relationship remains stable while there's contention. |
|
341 |
Atomic::inc_ptr(&_count); |
|
342 |
||
18025 | 343 |
EventJavaMonitorEnter event; |
344 |
||
6975 | 345 |
{ // Change java thread status to indicate blocked on monitor enter. |
346 |
JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); |
|
347 |
||
348 |
DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); |
|
349 |
if (JvmtiExport::should_post_monitor_contended_enter()) { |
|
350 |
JvmtiExport::post_monitor_contended_enter(jt, this); |
|
22753
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
351 |
|
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
352 |
// The current thread does not yet own the monitor and does not |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
353 |
// yet appear on any queues that would get it made the successor. |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
354 |
// This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
355 |
// handler cannot accidentally consume an unpark() meant for the |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
356 |
// ParkEvent associated with this ObjectMonitor. |
6975 | 357 |
} |
358 |
||
359 |
OSThreadContendState osts(Self->osthread()); |
|
360 |
ThreadBlockInVM tbivm(jt); |
|
361 |
||
362 |
Self->set_current_pending_monitor(this); |
|
363 |
||
364 |
// TODO-FIXME: change the following for(;;) loop to straight-line code. |
|
365 |
for (;;) { |
|
366 |
jt->set_suspend_equivalent(); |
|
367 |
// cleared by handle_special_suspend_equivalent_condition() |
|
368 |
// or java_suspend_self() |
|
369 |
||
370 |
EnterI (THREAD) ; |
|
371 |
||
372 |
if (!ExitSuspendEquivalent(jt)) break ; |
|
373 |
||
374 |
// |
|
375 |
// We have acquired the contended monitor, but while we were |
|
376 |
// waiting another thread suspended us. We don't want to enter |
|
377 |
// the monitor while suspended because that would surprise the |
|
378 |
// thread that suspended us. |
|
379 |
// |
|
380 |
_recursions = 0 ; |
|
381 |
_succ = NULL ; |
|
18025 | 382 |
exit (false, Self) ; |
6975 | 383 |
|
384 |
jt->java_suspend_self(); |
|
385 |
} |
|
386 |
Self->set_current_pending_monitor(NULL); |
|
387 |
} |
|
388 |
||
389 |
Atomic::dec_ptr(&_count); |
|
390 |
assert (_count >= 0, "invariant") ; |
|
391 |
Self->_Stalled = 0 ; |
|
392 |
||
393 |
// Must either set _recursions = 0 or ASSERT _recursions == 0. |
|
394 |
assert (_recursions == 0 , "invariant") ; |
|
395 |
assert (_owner == Self , "invariant") ; |
|
396 |
assert (_succ != Self , "invariant") ; |
|
397 |
assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
|
398 |
||
399 |
// The thread -- now the owner -- is back in vm mode. |
|
400 |
// Report the glorious news via TI,DTrace and jvmstat. |
|
401 |
// The probe effect is non-trivial. All the reportage occurs |
|
402 |
// while we hold the monitor, increasing the length of the critical |
|
403 |
// section. Amdahl's parallel speedup law comes vividly into play. |
|
404 |
// |
|
405 |
// Another option might be to aggregate the events (thread local or |
|
406 |
// per-monitor aggregation) and defer reporting until a more opportune |
|
407 |
// time -- such as next time some thread encounters contention but has |
|
408 |
// yet to acquire the lock. While spinning that thread could |
|
409 |
// spinning we could increment JVMStat counters, etc. |
|
410 |
||
411 |
DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); |
|
412 |
if (JvmtiExport::should_post_monitor_contended_entered()) { |
|
413 |
JvmtiExport::post_monitor_contended_entered(jt, this); |
|
22753
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
414 |
|
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
415 |
// The current thread already owns the monitor and is not going to |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
416 |
// call park() for the remainder of the monitor enter protocol. So |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
417 |
// it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
418 |
// event handler consumed an unpark() issued by the thread that |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
419 |
// just exited the monitor. |
6975 | 420 |
} |
18025 | 421 |
|
422 |
if (event.should_commit()) { |
|
423 |
event.set_klass(((oop)this->object())->klass()); |
|
424 |
event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); |
|
425 |
event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
|
426 |
event.commit(); |
|
427 |
} |
|
428 |
||
6975 | 429 |
if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { |
430 |
ObjectMonitor::_sync_ContendedLockAttempts->inc() ; |
|
431 |
} |
|
432 |
} |
|
433 |
||
434 |
||
435 |
// Caveat: TryLock() is not necessarily serializing if it returns failure. |
|
436 |
// Callers must compensate as needed. |
|
437 |
||
438 |
int ObjectMonitor::TryLock (Thread * Self) { |
|
439 |
for (;;) { |
|
440 |
void * own = _owner ; |
|
441 |
if (own != NULL) return 0 ; |
|
442 |
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
|
443 |
// Either guarantee _recursions == 0 or set _recursions = 0. |
|
444 |
assert (_recursions == 0, "invariant") ; |
|
445 |
assert (_owner == Self, "invariant") ; |
|
446 |
// CONSIDER: set or assert that OwnerIsThread == 1 |
|
447 |
return 1 ; |
|
448 |
} |
|
449 |
// The lock had been free momentarily, but we lost the race to the lock. |
|
450 |
// Interference -- the CAS failed. |
|
451 |
// We can either return -1 or retry. |
|
452 |
// Retry doesn't make as much sense because the lock was just acquired. |
|
453 |
if (true) return -1 ; |
|
454 |
} |
|
455 |
} |
|
456 |
||
457 |
void ATTR ObjectMonitor::EnterI (TRAPS) { |
|
458 |
Thread * Self = THREAD ; |
|
459 |
assert (Self->is_Java_thread(), "invariant") ; |
|
460 |
assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; |
|
461 |
||
462 |
// Try the lock - TATAS |
|
463 |
if (TryLock (Self) > 0) { |
|
464 |
assert (_succ != Self , "invariant") ; |
|
465 |
assert (_owner == Self , "invariant") ; |
|
466 |
assert (_Responsible != Self , "invariant") ; |
|
467 |
return ; |
|
468 |
} |
|
469 |
||
470 |
DeferredInitialize () ; |
|
471 |
||
472 |
// We try one round of spinning *before* enqueueing Self. |
|
473 |
// |
|
474 |
// If the _owner is ready but OFFPROC we could use a YieldTo() |
|
475 |
// operation to donate the remainder of this thread's quantum |
|
476 |
// to the owner. This has subtle but beneficial affinity |
|
477 |
// effects. |
|
478 |
||
479 |
if (TrySpin (Self) > 0) { |
|
480 |
assert (_owner == Self , "invariant") ; |
|
481 |
assert (_succ != Self , "invariant") ; |
|
482 |
assert (_Responsible != Self , "invariant") ; |
|
483 |
return ; |
|
484 |
} |
|
485 |
||
486 |
// The Spin failed -- Enqueue and park the thread ... |
|
487 |
assert (_succ != Self , "invariant") ; |
|
488 |
assert (_owner != Self , "invariant") ; |
|
489 |
assert (_Responsible != Self , "invariant") ; |
|
490 |
||
491 |
// Enqueue "Self" on ObjectMonitor's _cxq. |
|
492 |
// |
|
493 |
// Node acts as a proxy for Self. |
|
494 |
// As an aside, if were to ever rewrite the synchronization code mostly |
|
495 |
// in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class |
|
496 |
// Java objects. This would avoid awkward lifecycle and liveness issues, |
|
497 |
// as well as eliminate a subset of ABA issues. |
|
498 |
// TODO: eliminate ObjectWaiter and enqueue either Threads or Events. |
|
499 |
// |
|
500 |
||
501 |
ObjectWaiter node(Self) ; |
|
502 |
Self->_ParkEvent->reset() ; |
|
503 |
node._prev = (ObjectWaiter *) 0xBAD ; |
|
504 |
node.TState = ObjectWaiter::TS_CXQ ; |
|
505 |
||
506 |
// Push "Self" onto the front of the _cxq. |
|
507 |
// Once on cxq/EntryList, Self stays on-queue until it acquires the lock. |
|
508 |
// Note that spinning tends to reduce the rate at which threads |
|
509 |
// enqueue and dequeue on EntryList|cxq. |
|
510 |
ObjectWaiter * nxt ; |
|
511 |
for (;;) { |
|
512 |
node._next = nxt = _cxq ; |
|
513 |
if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; |
|
514 |
||
515 |
// Interference - the CAS failed because _cxq changed. Just retry. |
|
516 |
// As an optional optimization we retry the lock. |
|
517 |
if (TryLock (Self) > 0) { |
|
518 |
assert (_succ != Self , "invariant") ; |
|
519 |
assert (_owner == Self , "invariant") ; |
|
520 |
assert (_Responsible != Self , "invariant") ; |
|
521 |
return ; |
|
522 |
} |
|
523 |
} |
|
524 |
||
525 |
// Check for cxq|EntryList edge transition to non-null. This indicates |
|
526 |
// the onset of contention. While contention persists exiting threads |
|
527 |
// will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit |
|
528 |
// operations revert to the faster 1-0 mode. This enter operation may interleave |
|
529 |
// (race) a concurrent 1-0 exit operation, resulting in stranding, so we |
|
530 |
// arrange for one of the contending thread to use a timed park() operations |
|
531 |
// to detect and recover from the race. (Stranding is form of progress failure |
|
532 |
// where the monitor is unlocked but all the contending threads remain parked). |
|
533 |
// That is, at least one of the contended threads will periodically poll _owner. |
|
534 |
// One of the contending threads will become the designated "Responsible" thread. |
|
535 |
// The Responsible thread uses a timed park instead of a normal indefinite park |
|
536 |
// operation -- it periodically wakes and checks for and recovers from potential |
|
537 |
// strandings admitted by 1-0 exit operations. We need at most one Responsible |
|
538 |
// thread per-monitor at any given moment. Only threads on cxq|EntryList may |
|
539 |
// be responsible for a monitor. |
|
540 |
// |
|
541 |
// Currently, one of the contended threads takes on the added role of "Responsible". |
|
542 |
// A viable alternative would be to use a dedicated "stranding checker" thread |
|
543 |
// that periodically iterated over all the threads (or active monitors) and unparked |
|
544 |
// successors where there was risk of stranding. This would help eliminate the |
|
545 |
// timer scalability issues we see on some platforms as we'd only have one thread |
|
546 |
// -- the checker -- parked on a timer. |
|
547 |
||
548 |
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { |
|
549 |
// Try to assume the role of responsible thread for the monitor. |
|
550 |
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
|
551 |
Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
|
552 |
} |
|
553 |
||
554 |
// The lock have been released while this thread was occupied queueing |
|
555 |
// itself onto _cxq. To close the race and avoid "stranding" and |
|
556 |
// progress-liveness failure we must resample-retry _owner before parking. |
|
557 |
// Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. |
|
558 |
// In this case the ST-MEMBAR is accomplished with CAS(). |
|
559 |
// |
|
560 |
// TODO: Defer all thread state transitions until park-time. |
|
561 |
// Since state transitions are heavy and inefficient we'd like |
|
562 |
// to defer the state transitions until absolutely necessary, |
|
563 |
// and in doing so avoid some transitions ... |
|
564 |
||
565 |
TEVENT (Inflated enter - Contention) ; |
|
566 |
int nWakeups = 0 ; |
|
567 |
int RecheckInterval = 1 ; |
|
568 |
||
569 |
for (;;) { |
|
570 |
||
571 |
if (TryLock (Self) > 0) break ; |
|
572 |
assert (_owner != Self, "invariant") ; |
|
573 |
||
574 |
if ((SyncFlags & 2) && _Responsible == NULL) { |
|
575 |
Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
|
576 |
} |
|
577 |
||
578 |
// park self |
|
579 |
if (_Responsible == Self || (SyncFlags & 1)) { |
|
580 |
TEVENT (Inflated enter - park TIMED) ; |
|
581 |
Self->_ParkEvent->park ((jlong) RecheckInterval) ; |
|
582 |
// Increase the RecheckInterval, but clamp the value. |
|
583 |
RecheckInterval *= 8 ; |
|
584 |
if (RecheckInterval > 1000) RecheckInterval = 1000 ; |
|
585 |
} else { |
|
586 |
TEVENT (Inflated enter - park UNTIMED) ; |
|
587 |
Self->_ParkEvent->park() ; |
|
588 |
} |
|
589 |
||
590 |
if (TryLock(Self) > 0) break ; |
|
591 |
||
592 |
// The lock is still contested. |
|
593 |
// Keep a tally of the # of futile wakeups. |
|
594 |
// Note that the counter is not protected by a lock or updated by atomics. |
|
595 |
// That is by design - we trade "lossy" counters which are exposed to |
|
596 |
// races during updates for a lower probe effect. |
|
597 |
TEVENT (Inflated enter - Futile wakeup) ; |
|
598 |
if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
|
599 |
ObjectMonitor::_sync_FutileWakeups->inc() ; |
|
600 |
} |
|
601 |
++ nWakeups ; |
|
602 |
||
603 |
// Assuming this is not a spurious wakeup we'll normally find _succ == Self. |
|
604 |
// We can defer clearing _succ until after the spin completes |
|
605 |
// TrySpin() must tolerate being called with _succ == Self. |
|
606 |
// Try yet another round of adaptive spinning. |
|
607 |
if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; |
|
608 |
||
609 |
// We can find that we were unpark()ed and redesignated _succ while |
|
610 |
// we were spinning. That's harmless. If we iterate and call park(), |
|
611 |
// park() will consume the event and return immediately and we'll |
|
612 |
// just spin again. This pattern can repeat, leaving _succ to simply |
|
613 |
// spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). |
|
614 |
// Alternately, we can sample fired() here, and if set, forgo spinning |
|
615 |
// in the next iteration. |
|
616 |
||
617 |
if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { |
|
618 |
Self->_ParkEvent->reset() ; |
|
619 |
OrderAccess::fence() ; |
|
620 |
} |
|
621 |
if (_succ == Self) _succ = NULL ; |
|
622 |
||
623 |
// Invariant: after clearing _succ a thread *must* retry _owner before parking. |
|
624 |
OrderAccess::fence() ; |
|
625 |
} |
|
626 |
||
627 |
// Egress : |
|
628 |
// Self has acquired the lock -- Unlink Self from the cxq or EntryList. |
|
629 |
// Normally we'll find Self on the EntryList . |
|
630 |
// From the perspective of the lock owner (this thread), the |
|
631 |
// EntryList is stable and cxq is prepend-only. |
|
632 |
// The head of cxq is volatile but the interior is stable. |
|
633 |
// In addition, Self.TState is stable. |
|
634 |
||
635 |
assert (_owner == Self , "invariant") ; |
|
636 |
assert (object() != NULL , "invariant") ; |
|
637 |
// I'd like to write: |
|
638 |
// guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
|
639 |
// but as we're at a safepoint that's not safe. |
|
640 |
||
641 |
UnlinkAfterAcquire (Self, &node) ; |
|
642 |
if (_succ == Self) _succ = NULL ; |
|
643 |
||
644 |
assert (_succ != Self, "invariant") ; |
|
645 |
if (_Responsible == Self) { |
|
646 |
_Responsible = NULL ; |
|
15234
ff1f01be5fbd
8004902: correctness fixes motivated by contended locking work (6607129)
dcubed
parents:
14583
diff
changeset
|
647 |
OrderAccess::fence(); // Dekker pivot-point |
6975 | 648 |
|
649 |
// We may leave threads on cxq|EntryList without a designated |
|
650 |
// "Responsible" thread. This is benign. When this thread subsequently |
|
651 |
// exits the monitor it can "see" such preexisting "old" threads -- |
|
652 |
// threads that arrived on the cxq|EntryList before the fence, above -- |
|
653 |
// by LDing cxq|EntryList. Newly arrived threads -- that is, threads |
|
654 |
// that arrive on cxq after the ST:MEMBAR, above -- will set Responsible |
|
655 |
// non-null and elect a new "Responsible" timer thread. |
|
656 |
// |
|
657 |
// This thread executes: |
|
22551 | 658 |
// ST Responsible=null; MEMBAR (in enter epilogue - here) |
6975 | 659 |
// LD cxq|EntryList (in subsequent exit) |
660 |
// |
|
661 |
// Entering threads in the slow/contended path execute: |
|
662 |
// ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) |
|
663 |
// The (ST cxq; MEMBAR) is accomplished with CAS(). |
|
664 |
// |
|
665 |
// The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent |
|
666 |
// exit operation from floating above the ST Responsible=null. |
|
667 |
} |
|
668 |
||
669 |
// We've acquired ownership with CAS(). |
|
670 |
// CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. |
|
671 |
// But since the CAS() this thread may have also stored into _succ, |
|
672 |
// EntryList, cxq or Responsible. These meta-data updates must be |
|
673 |
// visible __before this thread subsequently drops the lock. |
|
674 |
// Consider what could occur if we didn't enforce this constraint -- |
|
675 |
// STs to monitor meta-data and user-data could reorder with (become |
|
676 |
// visible after) the ST in exit that drops ownership of the lock. |
|
677 |
// Some other thread could then acquire the lock, but observe inconsistent |
|
678 |
// or old monitor meta-data and heap data. That violates the JMM. |
|
679 |
// To that end, the 1-0 exit() operation must have at least STST|LDST |
|
680 |
// "release" barrier semantics. Specifically, there must be at least a |
|
681 |
// STST|LDST barrier in exit() before the ST of null into _owner that drops |
|
682 |
// the lock. The barrier ensures that changes to monitor meta-data and data |
|
683 |
// protected by the lock will be visible before we release the lock, and |
|
684 |
// therefore before some other thread (CPU) has a chance to acquire the lock. |
|
685 |
// See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. |
|
686 |
// |
|
687 |
// Critically, any prior STs to _succ or EntryList must be visible before |
|
688 |
// the ST of null into _owner in the *subsequent* (following) corresponding |
|
689 |
// monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily |
|
690 |
// execute a serializing instruction. |
|
691 |
||
692 |
if (SyncFlags & 8) { |
|
693 |
OrderAccess::fence() ; |
|
694 |
} |
|
695 |
return ; |
|
696 |
} |
|
697 |
||
698 |
// ReenterI() is a specialized inline form of the latter half of the |
|
699 |
// contended slow-path from EnterI(). We use ReenterI() only for |
|
700 |
// monitor reentry in wait(). |
|
701 |
// |
|
702 |
// In the future we should reconcile EnterI() and ReenterI(), adding |
|
703 |
// Knob_Reset and Knob_SpinAfterFutile support and restructuring the |
|
704 |
// loop accordingly. |
|
705 |
||
706 |
void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { |
|
707 |
assert (Self != NULL , "invariant") ; |
|
708 |
assert (SelfNode != NULL , "invariant") ; |
|
709 |
assert (SelfNode->_thread == Self , "invariant") ; |
|
710 |
assert (_waiters > 0 , "invariant") ; |
|
711 |
assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; |
|
712 |
assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
|
713 |
JavaThread * jt = (JavaThread *) Self ; |
|
714 |
||
715 |
int nWakeups = 0 ; |
|
716 |
for (;;) { |
|
717 |
ObjectWaiter::TStates v = SelfNode->TState ; |
|
718 |
guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
|
719 |
assert (_owner != Self, "invariant") ; |
|
720 |
||
721 |
if (TryLock (Self) > 0) break ; |
|
722 |
if (TrySpin (Self) > 0) break ; |
|
723 |
||
724 |
TEVENT (Wait Reentry - parking) ; |
|
725 |
||
726 |
// State transition wrappers around park() ... |
|
727 |
// ReenterI() wisely defers state transitions until |
|
728 |
// it's clear we must park the thread. |
|
729 |
{ |
|
730 |
OSThreadContendState osts(Self->osthread()); |
|
731 |
ThreadBlockInVM tbivm(jt); |
|
732 |
||
733 |
// cleared by handle_special_suspend_equivalent_condition() |
|
734 |
// or java_suspend_self() |
|
735 |
jt->set_suspend_equivalent(); |
|
736 |
if (SyncFlags & 1) { |
|
737 |
Self->_ParkEvent->park ((jlong)1000) ; |
|
738 |
} else { |
|
739 |
Self->_ParkEvent->park () ; |
|
740 |
} |
|
741 |
||
742 |
// were we externally suspended while we were waiting? |
|
743 |
for (;;) { |
|
744 |
if (!ExitSuspendEquivalent (jt)) break ; |
|
745 |
if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } |
|
746 |
jt->java_suspend_self(); |
|
747 |
jt->set_suspend_equivalent(); |
|
748 |
} |
|
749 |
} |
|
750 |
||
751 |
// Try again, but just so we distinguish between futile wakeups and |
|
752 |
// successful wakeups. The following test isn't algorithmically |
|
753 |
// necessary, but it helps us maintain sensible statistics. |
|
754 |
if (TryLock(Self) > 0) break ; |
|
755 |
||
756 |
// The lock is still contested. |
|
757 |
// Keep a tally of the # of futile wakeups. |
|
758 |
// Note that the counter is not protected by a lock or updated by atomics. |
|
759 |
// That is by design - we trade "lossy" counters which are exposed to |
|
760 |
// races during updates for a lower probe effect. |
|
761 |
TEVENT (Wait Reentry - futile wakeup) ; |
|
762 |
++ nWakeups ; |
|
763 |
||
764 |
// Assuming this is not a spurious wakeup we'll normally |
|
765 |
// find that _succ == Self. |
|
766 |
if (_succ == Self) _succ = NULL ; |
|
767 |
||
768 |
// Invariant: after clearing _succ a contending thread |
|
769 |
// *must* retry _owner before parking. |
|
770 |
OrderAccess::fence() ; |
|
771 |
||
772 |
if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
|
773 |
ObjectMonitor::_sync_FutileWakeups->inc() ; |
|
774 |
} |
|
775 |
} |
|
776 |
||
777 |
// Self has acquired the lock -- Unlink Self from the cxq or EntryList . |
|
778 |
// Normally we'll find Self on the EntryList. |
|
779 |
// Unlinking from the EntryList is constant-time and atomic-free. |
|
780 |
// From the perspective of the lock owner (this thread), the |
|
781 |
// EntryList is stable and cxq is prepend-only. |
|
782 |
// The head of cxq is volatile but the interior is stable. |
|
783 |
// In addition, Self.TState is stable. |
|
784 |
||
785 |
assert (_owner == Self, "invariant") ; |
|
786 |
assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
|
787 |
UnlinkAfterAcquire (Self, SelfNode) ; |
|
788 |
if (_succ == Self) _succ = NULL ; |
|
789 |
assert (_succ != Self, "invariant") ; |
|
790 |
SelfNode->TState = ObjectWaiter::TS_RUN ; |
|
791 |
OrderAccess::fence() ; // see comments at the end of EnterI() |
|
792 |
} |
|
793 |
||
794 |
// after the thread acquires the lock in ::enter(). Equally, we could defer |
|
795 |
// unlinking the thread until ::exit()-time. |
|
796 |
||
797 |
void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) |
|
798 |
{ |
|
799 |
assert (_owner == Self, "invariant") ; |
|
800 |
assert (SelfNode->_thread == Self, "invariant") ; |
|
801 |
||
802 |
if (SelfNode->TState == ObjectWaiter::TS_ENTER) { |
|
803 |
// Normal case: remove Self from the DLL EntryList . |
|
804 |
// This is a constant-time operation. |
|
805 |
ObjectWaiter * nxt = SelfNode->_next ; |
|
806 |
ObjectWaiter * prv = SelfNode->_prev ; |
|
807 |
if (nxt != NULL) nxt->_prev = prv ; |
|
808 |
if (prv != NULL) prv->_next = nxt ; |
|
809 |
if (SelfNode == _EntryList ) _EntryList = nxt ; |
|
810 |
assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
811 |
assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
812 |
TEVENT (Unlink from EntryList) ; |
|
813 |
} else { |
|
814 |
guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
|
815 |
// Inopportune interleaving -- Self is still on the cxq. |
|
816 |
// This usually means the enqueue of self raced an exiting thread. |
|
817 |
// Normally we'll find Self near the front of the cxq, so |
|
818 |
// dequeueing is typically fast. If needbe we can accelerate |
|
819 |
// this with some MCS/CHL-like bidirectional list hints and advisory |
|
820 |
// back-links so dequeueing from the interior will normally operate |
|
821 |
// in constant-time. |
|
822 |
// Dequeue Self from either the head (with CAS) or from the interior |
|
823 |
// with a linear-time scan and normal non-atomic memory operations. |
|
824 |
// CONSIDER: if Self is on the cxq then simply drain cxq into EntryList |
|
825 |
// and then unlink Self from EntryList. We have to drain eventually, |
|
826 |
// so it might as well be now. |
|
827 |
||
828 |
ObjectWaiter * v = _cxq ; |
|
829 |
assert (v != NULL, "invariant") ; |
|
830 |
if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { |
|
831 |
// The CAS above can fail from interference IFF a "RAT" arrived. |
|
832 |
// In that case Self must be in the interior and can no longer be |
|
833 |
// at the head of cxq. |
|
834 |
if (v == SelfNode) { |
|
835 |
assert (_cxq != v, "invariant") ; |
|
836 |
v = _cxq ; // CAS above failed - start scan at head of list |
|
837 |
} |
|
838 |
ObjectWaiter * p ; |
|
839 |
ObjectWaiter * q = NULL ; |
|
840 |
for (p = v ; p != NULL && p != SelfNode; p = p->_next) { |
|
841 |
q = p ; |
|
842 |
assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
|
843 |
} |
|
844 |
assert (v != SelfNode, "invariant") ; |
|
845 |
assert (p == SelfNode, "Node not found on cxq") ; |
|
846 |
assert (p != _cxq, "invariant") ; |
|
847 |
assert (q != NULL, "invariant") ; |
|
848 |
assert (q->_next == p, "invariant") ; |
|
849 |
q->_next = p->_next ; |
|
850 |
} |
|
851 |
TEVENT (Unlink from cxq) ; |
|
852 |
} |
|
853 |
||
854 |
// Diagnostic hygiene ... |
|
855 |
SelfNode->_prev = (ObjectWaiter *) 0xBAD ; |
|
856 |
SelfNode->_next = (ObjectWaiter *) 0xBAD ; |
|
857 |
SelfNode->TState = ObjectWaiter::TS_RUN ; |
|
858 |
} |
|
859 |
||
860 |
// ----------------------------------------------------------------------------- |
|
861 |
// Exit support |
|
862 |
// |
|
863 |
// exit() |
|
864 |
// ~~~~~~ |
|
865 |
// Note that the collector can't reclaim the objectMonitor or deflate |
|
866 |
// the object out from underneath the thread calling ::exit() as the |
|
867 |
// thread calling ::exit() never transitions to a stable state. |
|
868 |
// This inhibits GC, which in turn inhibits asynchronous (and |
|
869 |
// inopportune) reclamation of "this". |
|
870 |
// |
|
871 |
// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; |
|
872 |
// There's one exception to the claim above, however. EnterI() can call |
|
873 |
// exit() to drop a lock if the acquirer has been externally suspended. |
|
874 |
// In that case exit() is called with _thread_state as _thread_blocked, |
|
875 |
// but the monitor's _count field is > 0, which inhibits reclamation. |
|
876 |
// |
|
877 |
// 1-0 exit |
|
878 |
// ~~~~~~~~ |
|
879 |
// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of |
|
880 |
// the fast-path operators have been optimized so the common ::exit() |
|
881 |
// operation is 1-0. See i486.ad fast_unlock(), for instance. |
|
882 |
// The code emitted by fast_unlock() elides the usual MEMBAR. This |
|
883 |
// greatly improves latency -- MEMBAR and CAS having considerable local |
|
884 |
// latency on modern processors -- but at the cost of "stranding". Absent the |
|
885 |
// MEMBAR, a thread in fast_unlock() can race a thread in the slow |
|
886 |
// ::enter() path, resulting in the entering thread being stranding |
|
887 |
// and a progress-liveness failure. Stranding is extremely rare. |
|
888 |
// We use timers (timed park operations) & periodic polling to detect |
|
889 |
// and recover from stranding. Potentially stranded threads periodically |
|
890 |
// wake up and poll the lock. See the usage of the _Responsible variable. |
|
891 |
// |
|
892 |
// The CAS() in enter provides for safety and exclusion, while the CAS or |
|
893 |
// MEMBAR in exit provides for progress and avoids stranding. 1-0 locking |
|
894 |
// eliminates the CAS/MEMBAR from the exist path, but it admits stranding. |
|
895 |
// We detect and recover from stranding with timers. |
|
896 |
// |
|
897 |
// If a thread transiently strands it'll park until (a) another |
|
898 |
// thread acquires the lock and then drops the lock, at which time the |
|
899 |
// exiting thread will notice and unpark the stranded thread, or, (b) |
|
900 |
// the timer expires. If the lock is high traffic then the stranding latency |
|
901 |
// will be low due to (a). If the lock is low traffic then the odds of |
|
902 |
// stranding are lower, although the worst-case stranding latency |
|
903 |
// is longer. Critically, we don't want to put excessive load in the |
|
904 |
// platform's timer subsystem. We want to minimize both the timer injection |
|
905 |
// rate (timers created/sec) as well as the number of timers active at |
|
906 |
// any one time. (more precisely, we want to minimize timer-seconds, which is |
|
907 |
// the integral of the # of active timers at any instant over time). |
|
908 |
// Both impinge on OS scalability. Given that, at most one thread parked on |
|
909 |
// a monitor will use a timer. |
|
910 |
||
18025 | 911 |
void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { |
6975 | 912 |
Thread * Self = THREAD ; |
913 |
if (THREAD != _owner) { |
|
914 |
if (THREAD->is_lock_owned((address) _owner)) { |
|
915 |
// Transmute _owner from a BasicLock pointer to a Thread address. |
|
916 |
// We don't need to hold _mutex for this transition. |
|
917 |
// Non-null to Non-null is safe as long as all readers can |
|
918 |
// tolerate either flavor. |
|
919 |
assert (_recursions == 0, "invariant") ; |
|
920 |
_owner = THREAD ; |
|
921 |
_recursions = 0 ; |
|
922 |
OwnerIsThread = 1 ; |
|
923 |
} else { |
|
924 |
// NOTE: we need to handle unbalanced monitor enter/exit |
|
925 |
// in native code by throwing an exception. |
|
926 |
// TODO: Throw an IllegalMonitorStateException ? |
|
927 |
TEVENT (Exit - Throw IMSX) ; |
|
928 |
assert(false, "Non-balanced monitor enter/exit!"); |
|
929 |
if (false) { |
|
930 |
THROW(vmSymbols::java_lang_IllegalMonitorStateException()); |
|
931 |
} |
|
932 |
return; |
|
933 |
} |
|
934 |
} |
|
935 |
||
936 |
if (_recursions != 0) { |
|
937 |
_recursions--; // this is simple recursive enter |
|
938 |
TEVENT (Inflated exit - recursive) ; |
|
939 |
return ; |
|
940 |
} |
|
941 |
||
942 |
// Invariant: after setting Responsible=null an thread must execute |
|
943 |
// a MEMBAR or other serializing instruction before fetching EntryList|cxq. |
|
944 |
if ((SyncFlags & 4) == 0) { |
|
945 |
_Responsible = NULL ; |
|
946 |
} |
|
947 |
||
18025 | 948 |
#if INCLUDE_TRACE |
949 |
// get the owner's thread id for the MonitorEnter event |
|
950 |
// if it is enabled and the thread isn't suspended |
|
951 |
if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { |
|
952 |
_previous_owner_tid = SharedRuntime::get_java_tid(Self); |
|
953 |
} |
|
954 |
#endif |
|
955 |
||
6975 | 956 |
for (;;) { |
957 |
assert (THREAD == _owner, "invariant") ; |
|
958 |
||
959 |
||
960 |
if (Knob_ExitPolicy == 0) { |
|
961 |
// release semantics: prior loads and stores from within the critical section |
|
962 |
// must not float (reorder) past the following store that drops the lock. |
|
963 |
// On SPARC that requires MEMBAR #loadstore|#storestore. |
|
964 |
// But of course in TSO #loadstore|#storestore is not required. |
|
965 |
// I'd like to write one of the following: |
|
966 |
// A. OrderAccess::release() ; _owner = NULL |
|
967 |
// B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; |
|
968 |
// Unfortunately OrderAccess::release() and OrderAccess::loadstore() both |
|
969 |
// store into a _dummy variable. That store is not needed, but can result |
|
970 |
// in massive wasteful coherency traffic on classic SMP systems. |
|
971 |
// Instead, I use release_store(), which is implemented as just a simple |
|
972 |
// ST on x64, x86 and SPARC. |
|
973 |
OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
|
974 |
OrderAccess::storeload() ; // See if we need to wake a successor |
|
975 |
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
|
976 |
TEVENT (Inflated exit - simple egress) ; |
|
977 |
return ; |
|
978 |
} |
|
979 |
TEVENT (Inflated exit - complex egress) ; |
|
980 |
||
981 |
// Normally the exiting thread is responsible for ensuring succession, |
|
982 |
// but if other successors are ready or other entering threads are spinning |
|
983 |
// then this thread can simply store NULL into _owner and exit without |
|
984 |
// waking a successor. The existence of spinners or ready successors |
|
985 |
// guarantees proper succession (liveness). Responsibility passes to the |
|
986 |
// ready or running successors. The exiting thread delegates the duty. |
|
987 |
// More precisely, if a successor already exists this thread is absolved |
|
988 |
// of the responsibility of waking (unparking) one. |
|
989 |
// |
|
990 |
// The _succ variable is critical to reducing futile wakeup frequency. |
|
991 |
// _succ identifies the "heir presumptive" thread that has been made |
|
992 |
// ready (unparked) but that has not yet run. We need only one such |
|
993 |
// successor thread to guarantee progress. |
|
994 |
// See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf |
|
995 |
// section 3.3 "Futile Wakeup Throttling" for details. |
|
996 |
// |
|
997 |
// Note that spinners in Enter() also set _succ non-null. |
|
998 |
// In the current implementation spinners opportunistically set |
|
999 |
// _succ so that exiting threads might avoid waking a successor. |
|
1000 |
// Another less appealing alternative would be for the exiting thread |
|
1001 |
// to drop the lock and then spin briefly to see if a spinner managed |
|
1002 |
// to acquire the lock. If so, the exiting thread could exit |
|
1003 |
// immediately without waking a successor, otherwise the exiting |
|
1004 |
// thread would need to dequeue and wake a successor. |
|
1005 |
// (Note that we'd need to make the post-drop spin short, but no |
|
1006 |
// shorter than the worst-case round-trip cache-line migration time. |
|
1007 |
// The dropped lock needs to become visible to the spinner, and then |
|
1008 |
// the acquisition of the lock by the spinner must become visible to |
|
1009 |
// the exiting thread). |
|
1010 |
// |
|
1011 |
||
1012 |
// It appears that an heir-presumptive (successor) must be made ready. |
|
1013 |
// Only the current lock owner can manipulate the EntryList or |
|
1014 |
// drain _cxq, so we need to reacquire the lock. If we fail |
|
1015 |
// to reacquire the lock the responsibility for ensuring succession |
|
1016 |
// falls to the new owner. |
|
1017 |
// |
|
1018 |
if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
|
1019 |
return ; |
|
1020 |
} |
|
1021 |
TEVENT (Exit - Reacquired) ; |
|
1022 |
} else { |
|
1023 |
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
|
1024 |
OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
|
1025 |
OrderAccess::storeload() ; |
|
1026 |
// Ratify the previously observed values. |
|
1027 |
if (_cxq == NULL || _succ != NULL) { |
|
1028 |
TEVENT (Inflated exit - simple egress) ; |
|
1029 |
return ; |
|
1030 |
} |
|
1031 |
||
1032 |
// inopportune interleaving -- the exiting thread (this thread) |
|
1033 |
// in the fast-exit path raced an entering thread in the slow-enter |
|
1034 |
// path. |
|
1035 |
// We have two choices: |
|
1036 |
// A. Try to reacquire the lock. |
|
1037 |
// If the CAS() fails return immediately, otherwise |
|
1038 |
// we either restart/rerun the exit operation, or simply |
|
1039 |
// fall-through into the code below which wakes a successor. |
|
1040 |
// B. If the elements forming the EntryList|cxq are TSM |
|
1041 |
// we could simply unpark() the lead thread and return |
|
1042 |
// without having set _succ. |
|
1043 |
if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
|
1044 |
TEVENT (Inflated exit - reacquired succeeded) ; |
|
1045 |
return ; |
|
1046 |
} |
|
1047 |
TEVENT (Inflated exit - reacquired failed) ; |
|
1048 |
} else { |
|
1049 |
TEVENT (Inflated exit - complex egress) ; |
|
1050 |
} |
|
1051 |
} |
|
1052 |
||
1053 |
guarantee (_owner == THREAD, "invariant") ; |
|
1054 |
||
1055 |
ObjectWaiter * w = NULL ; |
|
1056 |
int QMode = Knob_QMode ; |
|
1057 |
||
1058 |
if (QMode == 2 && _cxq != NULL) { |
|
1059 |
// QMode == 2 : cxq has precedence over EntryList. |
|
1060 |
// Try to directly wake a successor from the cxq. |
|
1061 |
// If successful, the successor will need to unlink itself from cxq. |
|
1062 |
w = _cxq ; |
|
1063 |
assert (w != NULL, "invariant") ; |
|
1064 |
assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
|
1065 |
ExitEpilog (Self, w) ; |
|
1066 |
return ; |
|
1067 |
} |
|
1068 |
||
1069 |
if (QMode == 3 && _cxq != NULL) { |
|
1070 |
// Aggressively drain cxq into EntryList at the first opportunity. |
|
1071 |
// This policy ensure that recently-run threads live at the head of EntryList. |
|
1072 |
// Drain _cxq into EntryList - bulk transfer. |
|
1073 |
// First, detach _cxq. |
|
1074 |
// The following loop is tantamount to: w = swap (&cxq, NULL) |
|
1075 |
w = _cxq ; |
|
1076 |
for (;;) { |
|
1077 |
assert (w != NULL, "Invariant") ; |
|
1078 |
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
|
1079 |
if (u == w) break ; |
|
1080 |
w = u ; |
|
1081 |
} |
|
1082 |
assert (w != NULL , "invariant") ; |
|
1083 |
||
1084 |
ObjectWaiter * q = NULL ; |
|
1085 |
ObjectWaiter * p ; |
|
1086 |
for (p = w ; p != NULL ; p = p->_next) { |
|
1087 |
guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
|
1088 |
p->TState = ObjectWaiter::TS_ENTER ; |
|
1089 |
p->_prev = q ; |
|
1090 |
q = p ; |
|
1091 |
} |
|
1092 |
||
1093 |
// Append the RATs to the EntryList |
|
1094 |
// TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. |
|
1095 |
ObjectWaiter * Tail ; |
|
1096 |
for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; |
|
1097 |
if (Tail == NULL) { |
|
1098 |
_EntryList = w ; |
|
1099 |
} else { |
|
1100 |
Tail->_next = w ; |
|
1101 |
w->_prev = Tail ; |
|
1102 |
} |
|
1103 |
||
1104 |
// Fall thru into code that tries to wake a successor from EntryList |
|
1105 |
} |
|
1106 |
||
1107 |
if (QMode == 4 && _cxq != NULL) { |
|
1108 |
// Aggressively drain cxq into EntryList at the first opportunity. |
|
1109 |
// This policy ensure that recently-run threads live at the head of EntryList. |
|
1110 |
||
1111 |
// Drain _cxq into EntryList - bulk transfer. |
|
1112 |
// First, detach _cxq. |
|
1113 |
// The following loop is tantamount to: w = swap (&cxq, NULL) |
|
1114 |
w = _cxq ; |
|
1115 |
for (;;) { |
|
1116 |
assert (w != NULL, "Invariant") ; |
|
1117 |
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
|
1118 |
if (u == w) break ; |
|
1119 |
w = u ; |
|
1120 |
} |
|
1121 |
assert (w != NULL , "invariant") ; |
|
1122 |
||
1123 |
ObjectWaiter * q = NULL ; |
|
1124 |
ObjectWaiter * p ; |
|
1125 |
for (p = w ; p != NULL ; p = p->_next) { |
|
1126 |
guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
|
1127 |
p->TState = ObjectWaiter::TS_ENTER ; |
|
1128 |
p->_prev = q ; |
|
1129 |
q = p ; |
|
1130 |
} |
|
1131 |
||
1132 |
// Prepend the RATs to the EntryList |
|
1133 |
if (_EntryList != NULL) { |
|
1134 |
q->_next = _EntryList ; |
|
1135 |
_EntryList->_prev = q ; |
|
1136 |
} |
|
1137 |
_EntryList = w ; |
|
1138 |
||
1139 |
// Fall thru into code that tries to wake a successor from EntryList |
|
1140 |
} |
|
1141 |
||
1142 |
w = _EntryList ; |
|
1143 |
if (w != NULL) { |
|
1144 |
// I'd like to write: guarantee (w->_thread != Self). |
|
1145 |
// But in practice an exiting thread may find itself on the EntryList. |
|
1146 |
// Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and |
|
1147 |
// then calls exit(). Exit release the lock by setting O._owner to NULL. |
|
1148 |
// Lets say T1 then stalls. T2 acquires O and calls O.notify(). The |
|
1149 |
// notify() operation moves T1 from O's waitset to O's EntryList. T2 then |
|
1150 |
// release the lock "O". T2 resumes immediately after the ST of null into |
|
1151 |
// _owner, above. T2 notices that the EntryList is populated, so it |
|
1152 |
// reacquires the lock and then finds itself on the EntryList. |
|
1153 |
// Given all that, we have to tolerate the circumstance where "w" is |
|
1154 |
// associated with Self. |
|
1155 |
assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
1156 |
ExitEpilog (Self, w) ; |
|
1157 |
return ; |
|
1158 |
} |
|
1159 |
||
1160 |
// If we find that both _cxq and EntryList are null then just |
|
1161 |
// re-run the exit protocol from the top. |
|
1162 |
w = _cxq ; |
|
1163 |
if (w == NULL) continue ; |
|
1164 |
||
1165 |
// Drain _cxq into EntryList - bulk transfer. |
|
1166 |
// First, detach _cxq. |
|
1167 |
// The following loop is tantamount to: w = swap (&cxq, NULL) |
|
1168 |
for (;;) { |
|
1169 |
assert (w != NULL, "Invariant") ; |
|
1170 |
ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
|
1171 |
if (u == w) break ; |
|
1172 |
w = u ; |
|
1173 |
} |
|
1174 |
TEVENT (Inflated exit - drain cxq into EntryList) ; |
|
1175 |
||
1176 |
assert (w != NULL , "invariant") ; |
|
1177 |
assert (_EntryList == NULL , "invariant") ; |
|
1178 |
||
1179 |
// Convert the LIFO SLL anchored by _cxq into a DLL. |
|
1180 |
// The list reorganization step operates in O(LENGTH(w)) time. |
|
1181 |
// It's critical that this step operate quickly as |
|
1182 |
// "Self" still holds the outer-lock, restricting parallelism |
|
1183 |
// and effectively lengthening the critical section. |
|
1184 |
// Invariant: s chases t chases u. |
|
1185 |
// TODO-FIXME: consider changing EntryList from a DLL to a CDLL so |
|
1186 |
// we have faster access to the tail. |
|
1187 |
||
1188 |
if (QMode == 1) { |
|
1189 |
// QMode == 1 : drain cxq to EntryList, reversing order |
|
1190 |
// We also reverse the order of the list. |
|
1191 |
ObjectWaiter * s = NULL ; |
|
1192 |
ObjectWaiter * t = w ; |
|
1193 |
ObjectWaiter * u = NULL ; |
|
1194 |
while (t != NULL) { |
|
1195 |
guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
|
1196 |
t->TState = ObjectWaiter::TS_ENTER ; |
|
1197 |
u = t->_next ; |
|
1198 |
t->_prev = u ; |
|
1199 |
t->_next = s ; |
|
1200 |
s = t; |
|
1201 |
t = u ; |
|
1202 |
} |
|
1203 |
_EntryList = s ; |
|
1204 |
assert (s != NULL, "invariant") ; |
|
1205 |
} else { |
|
1206 |
// QMode == 0 or QMode == 2 |
|
1207 |
_EntryList = w ; |
|
1208 |
ObjectWaiter * q = NULL ; |
|
1209 |
ObjectWaiter * p ; |
|
1210 |
for (p = w ; p != NULL ; p = p->_next) { |
|
1211 |
guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
|
1212 |
p->TState = ObjectWaiter::TS_ENTER ; |
|
1213 |
p->_prev = q ; |
|
1214 |
q = p ; |
|
1215 |
} |
|
1216 |
} |
|
1217 |
||
1218 |
// In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL |
|
1219 |
// The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). |
|
1220 |
||
1221 |
// See if we can abdicate to a spinner instead of waking a thread. |
|
1222 |
// A primary goal of the implementation is to reduce the |
|
1223 |
// context-switch rate. |
|
1224 |
if (_succ != NULL) continue; |
|
1225 |
||
1226 |
w = _EntryList ; |
|
1227 |
if (w != NULL) { |
|
1228 |
guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
1229 |
ExitEpilog (Self, w) ; |
|
1230 |
return ; |
|
1231 |
} |
|
1232 |
} |
|
1233 |
} |
|
1234 |
||
1235 |
// ExitSuspendEquivalent: |
|
1236 |
// A faster alternate to handle_special_suspend_equivalent_condition() |
|
1237 |
// |
|
1238 |
// handle_special_suspend_equivalent_condition() unconditionally |
|
1239 |
// acquires the SR_lock. On some platforms uncontended MutexLocker() |
|
1240 |
// operations have high latency. Note that in ::enter() we call HSSEC |
|
1241 |
// while holding the monitor, so we effectively lengthen the critical sections. |
|
1242 |
// |
|
1243 |
// There are a number of possible solutions: |
|
1244 |
// |
|
1245 |
// A. To ameliorate the problem we might also defer state transitions |
|
1246 |
// to as late as possible -- just prior to parking. |
|
1247 |
// Given that, we'd call HSSEC after having returned from park(), |
|
1248 |
// but before attempting to acquire the monitor. This is only a |
|
1249 |
// partial solution. It avoids calling HSSEC while holding the |
|
1250 |
// monitor (good), but it still increases successor reacquisition latency -- |
|
1251 |
// the interval between unparking a successor and the time the successor |
|
1252 |
// resumes and retries the lock. See ReenterI(), which defers state transitions. |
|
1253 |
// If we use this technique we can also avoid EnterI()-exit() loop |
|
1254 |
// in ::enter() where we iteratively drop the lock and then attempt |
|
1255 |
// to reacquire it after suspending. |
|
1256 |
// |
|
1257 |
// B. In the future we might fold all the suspend bits into a |
|
1258 |
// composite per-thread suspend flag and then update it with CAS(). |
|
1259 |
// Alternately, a Dekker-like mechanism with multiple variables |
|
1260 |
// would suffice: |
|
1261 |
// ST Self->_suspend_equivalent = false |
|
1262 |
// MEMBAR |
|
1263 |
// LD Self_>_suspend_flags |
|
1264 |
// |
|
1265 |
||
1266 |
||
1267 |
bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { |
|
1268 |
int Mode = Knob_FastHSSEC ; |
|
1269 |
if (Mode && !jSelf->is_external_suspend()) { |
|
1270 |
assert (jSelf->is_suspend_equivalent(), "invariant") ; |
|
1271 |
jSelf->clear_suspend_equivalent() ; |
|
1272 |
if (2 == Mode) OrderAccess::storeload() ; |
|
1273 |
if (!jSelf->is_external_suspend()) return false ; |
|
1274 |
// We raced a suspension -- fall thru into the slow path |
|
1275 |
TEVENT (ExitSuspendEquivalent - raced) ; |
|
1276 |
jSelf->set_suspend_equivalent() ; |
|
1277 |
} |
|
1278 |
return jSelf->handle_special_suspend_equivalent_condition() ; |
|
1279 |
} |
|
1280 |
||
1281 |
||
1282 |
void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { |
|
1283 |
assert (_owner == Self, "invariant") ; |
|
1284 |
||
1285 |
// Exit protocol: |
|
1286 |
// 1. ST _succ = wakee |
|
1287 |
// 2. membar #loadstore|#storestore; |
|
1288 |
// 2. ST _owner = NULL |
|
1289 |
// 3. unpark(wakee) |
|
1290 |
||
1291 |
_succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; |
|
1292 |
ParkEvent * Trigger = Wakee->_event ; |
|
1293 |
||
1294 |
// Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. |
|
1295 |
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be |
|
1296 |
// out-of-scope (non-extant). |
|
1297 |
Wakee = NULL ; |
|
1298 |
||
1299 |
// Drop the lock |
|
1300 |
OrderAccess::release_store_ptr (&_owner, NULL) ; |
|
1301 |
OrderAccess::fence() ; // ST _owner vs LD in unpark() |
|
1302 |
||
1303 |
if (SafepointSynchronize::do_call_back()) { |
|
1304 |
TEVENT (unpark before SAFEPOINT) ; |
|
1305 |
} |
|
1306 |
||
1307 |
DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); |
|
1308 |
Trigger->unpark() ; |
|
1309 |
||
1310 |
// Maintain stats and report events to JVMTI |
|
1311 |
if (ObjectMonitor::_sync_Parks != NULL) { |
|
1312 |
ObjectMonitor::_sync_Parks->inc() ; |
|
1313 |
} |
|
1314 |
} |
|
1315 |
||
1316 |
||
1317 |
// ----------------------------------------------------------------------------- |
|
1318 |
// Class Loader deadlock handling. |
|
1319 |
// |
|
1320 |
// complete_exit exits a lock returning recursion count |
|
1321 |
// complete_exit/reenter operate as a wait without waiting |
|
1322 |
// complete_exit requires an inflated monitor |
|
1323 |
// The _owner field is not always the Thread addr even with an |
|
1324 |
// inflated monitor, e.g. the monitor can be inflated by a non-owning |
|
1325 |
// thread due to contention. |
|
1326 |
intptr_t ObjectMonitor::complete_exit(TRAPS) { |
|
1327 |
Thread * const Self = THREAD; |
|
1328 |
assert(Self->is_Java_thread(), "Must be Java thread!"); |
|
1329 |
JavaThread *jt = (JavaThread *)THREAD; |
|
1330 |
||
1331 |
DeferredInitialize(); |
|
1332 |
||
1333 |
if (THREAD != _owner) { |
|
1334 |
if (THREAD->is_lock_owned ((address)_owner)) { |
|
1335 |
assert(_recursions == 0, "internal state error"); |
|
1336 |
_owner = THREAD ; /* Convert from basiclock addr to Thread addr */ |
|
1337 |
_recursions = 0 ; |
|
1338 |
OwnerIsThread = 1 ; |
|
1339 |
} |
|
1340 |
} |
|
1341 |
||
1342 |
guarantee(Self == _owner, "complete_exit not owner"); |
|
1343 |
intptr_t save = _recursions; // record the old recursion count |
|
1344 |
_recursions = 0; // set the recursion level to be 0 |
|
18025 | 1345 |
exit (true, Self) ; // exit the monitor |
6975 | 1346 |
guarantee (_owner != Self, "invariant"); |
1347 |
return save; |
|
1348 |
} |
|
1349 |
||
1350 |
// reenter() enters a lock and sets recursion count |
|
1351 |
// complete_exit/reenter operate as a wait without waiting |
|
1352 |
void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { |
|
1353 |
Thread * const Self = THREAD; |
|
1354 |
assert(Self->is_Java_thread(), "Must be Java thread!"); |
|
1355 |
JavaThread *jt = (JavaThread *)THREAD; |
|
1356 |
||
1357 |
guarantee(_owner != Self, "reenter already owner"); |
|
1358 |
enter (THREAD); // enter the monitor |
|
1359 |
guarantee (_recursions == 0, "reenter recursion"); |
|
1360 |
_recursions = recursions; |
|
1361 |
return; |
|
1362 |
} |
|
1363 |
||
1364 |
||
1365 |
// ----------------------------------------------------------------------------- |
|
1366 |
// A macro is used below because there may already be a pending |
|
1367 |
// exception which should not abort the execution of the routines |
|
1368 |
// which use this (which is why we don't put this into check_slow and |
|
1369 |
// call it with a CHECK argument). |
|
1370 |
||
1371 |
#define CHECK_OWNER() \ |
|
1372 |
do { \ |
|
1373 |
if (THREAD != _owner) { \ |
|
1374 |
if (THREAD->is_lock_owned((address) _owner)) { \ |
|
1375 |
_owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ |
|
1376 |
_recursions = 0; \ |
|
1377 |
OwnerIsThread = 1 ; \ |
|
1378 |
} else { \ |
|
1379 |
TEVENT (Throw IMSX) ; \ |
|
1380 |
THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ |
|
1381 |
} \ |
|
1382 |
} \ |
|
1383 |
} while (false) |
|
1384 |
||
1385 |
// check_slow() is a misnomer. It's called to simply to throw an IMSX exception. |
|
1386 |
// TODO-FIXME: remove check_slow() -- it's likely dead. |
|
1387 |
||
1388 |
void ObjectMonitor::check_slow(TRAPS) { |
|
1389 |
TEVENT (check_slow - throw IMSX) ; |
|
1390 |
assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); |
|
1391 |
THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); |
|
1392 |
} |
|
1393 |
||
1394 |
static int Adjust (volatile int * adr, int dx) { |
|
1395 |
int v ; |
|
1396 |
for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; |
|
1397 |
return v ; |
|
1398 |
} |
|
18025 | 1399 |
|
1400 |
// helper method for posting a monitor wait event |
|
1401 |
void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, |
|
1402 |
jlong notifier_tid, |
|
1403 |
jlong timeout, |
|
1404 |
bool timedout) { |
|
1405 |
event->set_klass(((oop)this->object())->klass()); |
|
1406 |
event->set_timeout((TYPE_ULONG)timeout); |
|
1407 |
event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
|
1408 |
event->set_notifier((TYPE_OSTHREAD)notifier_tid); |
|
1409 |
event->set_timedOut((TYPE_BOOLEAN)timedout); |
|
1410 |
event->commit(); |
|
1411 |
} |
|
1412 |
||
6975 | 1413 |
// ----------------------------------------------------------------------------- |
1414 |
// Wait/Notify/NotifyAll |
|
1415 |
// |
|
1416 |
// Note: a subset of changes to ObjectMonitor::wait() |
|
1417 |
// will need to be replicated in complete_exit above |
|
1418 |
void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { |
|
1419 |
Thread * const Self = THREAD ; |
|
1420 |
assert(Self->is_Java_thread(), "Must be Java thread!"); |
|
1421 |
JavaThread *jt = (JavaThread *)THREAD; |
|
1422 |
||
1423 |
DeferredInitialize () ; |
|
1424 |
||
1425 |
// Throw IMSX or IEX. |
|
1426 |
CHECK_OWNER(); |
|
1427 |
||
18025 | 1428 |
EventJavaMonitorWait event; |
1429 |
||
6975 | 1430 |
// check for a pending interrupt |
1431 |
if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
|
1432 |
// post monitor waited event. Note that this is past-tense, we are done waiting. |
|
1433 |
if (JvmtiExport::should_post_monitor_waited()) { |
|
1434 |
// Note: 'false' parameter is passed here because the |
|
1435 |
// wait was not timed out due to thread interrupt. |
|
1436 |
JvmtiExport::post_monitor_waited(jt, this, false); |
|
22753
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1437 |
|
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1438 |
// In this short circuit of the monitor wait protocol, the |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1439 |
// current thread never drops ownership of the monitor and |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1440 |
// never gets added to the wait queue so the current thread |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1441 |
// cannot be made the successor. This means that the |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1442 |
// JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1443 |
// consume an unpark() meant for the ParkEvent associated with |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1444 |
// this ObjectMonitor. |
6975 | 1445 |
} |
18025 | 1446 |
if (event.should_commit()) { |
1447 |
post_monitor_wait_event(&event, 0, millis, false); |
|
1448 |
} |
|
6975 | 1449 |
TEVENT (Wait - Throw IEX) ; |
1450 |
THROW(vmSymbols::java_lang_InterruptedException()); |
|
1451 |
return ; |
|
1452 |
} |
|
18025 | 1453 |
|
6975 | 1454 |
TEVENT (Wait) ; |
1455 |
||
1456 |
assert (Self->_Stalled == 0, "invariant") ; |
|
1457 |
Self->_Stalled = intptr_t(this) ; |
|
1458 |
jt->set_current_waiting_monitor(this); |
|
1459 |
||
1460 |
// create a node to be put into the queue |
|
1461 |
// Critically, after we reset() the event but prior to park(), we must check |
|
1462 |
// for a pending interrupt. |
|
1463 |
ObjectWaiter node(Self); |
|
1464 |
node.TState = ObjectWaiter::TS_WAIT ; |
|
1465 |
Self->_ParkEvent->reset() ; |
|
1466 |
OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag |
|
1467 |
||
1468 |
// Enter the waiting queue, which is a circular doubly linked list in this case |
|
1469 |
// but it could be a priority queue or any data structure. |
|
1470 |
// _WaitSetLock protects the wait queue. Normally the wait queue is accessed only |
|
1471 |
// by the the owner of the monitor *except* in the case where park() |
|
1472 |
// returns because of a timeout of interrupt. Contention is exceptionally rare |
|
1473 |
// so we use a simple spin-lock instead of a heavier-weight blocking lock. |
|
1474 |
||
1475 |
Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; |
|
1476 |
AddWaiter (&node) ; |
|
1477 |
Thread::SpinRelease (&_WaitSetLock) ; |
|
1478 |
||
1479 |
if ((SyncFlags & 4) == 0) { |
|
1480 |
_Responsible = NULL ; |
|
1481 |
} |
|
1482 |
intptr_t save = _recursions; // record the old recursion count |
|
1483 |
_waiters++; // increment the number of waiters |
|
1484 |
_recursions = 0; // set the recursion level to be 1 |
|
18025 | 1485 |
exit (true, Self) ; // exit the monitor |
6975 | 1486 |
guarantee (_owner != Self, "invariant") ; |
1487 |
||
1488 |
// The thread is on the WaitSet list - now park() it. |
|
1489 |
// On MP systems it's conceivable that a brief spin before we park |
|
1490 |
// could be profitable. |
|
1491 |
// |
|
1492 |
// TODO-FIXME: change the following logic to a loop of the form |
|
1493 |
// while (!timeout && !interrupted && _notified == 0) park() |
|
1494 |
||
1495 |
int ret = OS_OK ; |
|
1496 |
int WasNotified = 0 ; |
|
1497 |
{ // State transition wrappers |
|
1498 |
OSThread* osthread = Self->osthread(); |
|
1499 |
OSThreadWaitState osts(osthread, true); |
|
1500 |
{ |
|
1501 |
ThreadBlockInVM tbivm(jt); |
|
1502 |
// Thread is in thread_blocked state and oop access is unsafe. |
|
1503 |
jt->set_suspend_equivalent(); |
|
1504 |
||
1505 |
if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { |
|
1506 |
// Intentionally empty |
|
1507 |
} else |
|
1508 |
if (node._notified == 0) { |
|
1509 |
if (millis <= 0) { |
|
1510 |
Self->_ParkEvent->park () ; |
|
1511 |
} else { |
|
1512 |
ret = Self->_ParkEvent->park (millis) ; |
|
1513 |
} |
|
1514 |
} |
|
1515 |
||
1516 |
// were we externally suspended while we were waiting? |
|
1517 |
if (ExitSuspendEquivalent (jt)) { |
|
1518 |
// TODO-FIXME: add -- if succ == Self then succ = null. |
|
1519 |
jt->java_suspend_self(); |
|
1520 |
} |
|
1521 |
||
1522 |
} // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm |
|
1523 |
||
1524 |
||
1525 |
// Node may be on the WaitSet, the EntryList (or cxq), or in transition |
|
1526 |
// from the WaitSet to the EntryList. |
|
1527 |
// See if we need to remove Node from the WaitSet. |
|
1528 |
// We use double-checked locking to avoid grabbing _WaitSetLock |
|
1529 |
// if the thread is not on the wait queue. |
|
1530 |
// |
|
1531 |
// Note that we don't need a fence before the fetch of TState. |
|
1532 |
// In the worst case we'll fetch a old-stale value of TS_WAIT previously |
|
1533 |
// written by the is thread. (perhaps the fetch might even be satisfied |
|
1534 |
// by a look-aside into the processor's own store buffer, although given |
|
1535 |
// the length of the code path between the prior ST and this load that's |
|
1536 |
// highly unlikely). If the following LD fetches a stale TS_WAIT value |
|
1537 |
// then we'll acquire the lock and then re-fetch a fresh TState value. |
|
1538 |
// That is, we fail toward safety. |
|
1539 |
||
1540 |
if (node.TState == ObjectWaiter::TS_WAIT) { |
|
1541 |
Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; |
|
1542 |
if (node.TState == ObjectWaiter::TS_WAIT) { |
|
1543 |
DequeueSpecificWaiter (&node) ; // unlink from WaitSet |
|
1544 |
assert(node._notified == 0, "invariant"); |
|
1545 |
node.TState = ObjectWaiter::TS_RUN ; |
|
1546 |
} |
|
1547 |
Thread::SpinRelease (&_WaitSetLock) ; |
|
1548 |
} |
|
1549 |
||
1550 |
// The thread is now either on off-list (TS_RUN), |
|
1551 |
// on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). |
|
1552 |
// The Node's TState variable is stable from the perspective of this thread. |
|
1553 |
// No other threads will asynchronously modify TState. |
|
1554 |
guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; |
|
1555 |
OrderAccess::loadload() ; |
|
1556 |
if (_succ == Self) _succ = NULL ; |
|
1557 |
WasNotified = node._notified ; |
|
1558 |
||
1559 |
// Reentry phase -- reacquire the monitor. |
|
1560 |
// re-enter contended monitor after object.wait(). |
|
1561 |
// retain OBJECT_WAIT state until re-enter successfully completes |
|
1562 |
// Thread state is thread_in_vm and oop access is again safe, |
|
1563 |
// although the raw address of the object may have changed. |
|
1564 |
// (Don't cache naked oops over safepoints, of course). |
|
1565 |
||
1566 |
// post monitor waited event. Note that this is past-tense, we are done waiting. |
|
1567 |
if (JvmtiExport::should_post_monitor_waited()) { |
|
1568 |
JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); |
|
22753
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1569 |
|
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1570 |
if (node._notified != 0 && _succ == Self) { |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1571 |
// In this part of the monitor wait-notify-reenter protocol it |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1572 |
// is possible (and normal) for another thread to do a fastpath |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1573 |
// monitor enter-exit while this thread is still trying to get |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1574 |
// to the reenter portion of the protocol. |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1575 |
// |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1576 |
// The ObjectMonitor was notified and the current thread is |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1577 |
// the successor which also means that an unpark() has already |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1578 |
// been done. The JVMTI_EVENT_MONITOR_WAITED event handler can |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1579 |
// consume the unpark() that was done when the successor was |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1580 |
// set because the same ParkEvent is shared between Java |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1581 |
// monitors and JVM/TI RawMonitors (for now). |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1582 |
// |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1583 |
// We redo the unpark() to ensure forward progress, i.e., we |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1584 |
// don't want all pending threads hanging (parked) with none |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1585 |
// entering the unlocked monitor. |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1586 |
node._event->unpark(); |
749328896281
8028073: race condition in ObjectMonitor implementation causing deadlocks
dcubed
parents:
22551
diff
changeset
|
1587 |
} |
6975 | 1588 |
} |
18025 | 1589 |
|
1590 |
if (event.should_commit()) { |
|
1591 |
post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); |
|
1592 |
} |
|
1593 |
||
6975 | 1594 |
OrderAccess::fence() ; |
1595 |
||
1596 |
assert (Self->_Stalled != 0, "invariant") ; |
|
1597 |
Self->_Stalled = 0 ; |
|
1598 |
||
1599 |
assert (_owner != Self, "invariant") ; |
|
1600 |
ObjectWaiter::TStates v = node.TState ; |
|
1601 |
if (v == ObjectWaiter::TS_RUN) { |
|
1602 |
enter (Self) ; |
|
1603 |
} else { |
|
1604 |
guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
|
1605 |
ReenterI (Self, &node) ; |
|
1606 |
node.wait_reenter_end(this); |
|
1607 |
} |
|
1608 |
||
1609 |
// Self has reacquired the lock. |
|
1610 |
// Lifecycle - the node representing Self must not appear on any queues. |
|
1611 |
// Node is about to go out-of-scope, but even if it were immortal we wouldn't |
|
1612 |
// want residual elements associated with this thread left on any lists. |
|
1613 |
guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; |
|
1614 |
assert (_owner == Self, "invariant") ; |
|
1615 |
assert (_succ != Self , "invariant") ; |
|
1616 |
} // OSThreadWaitState() |
|
1617 |
||
1618 |
jt->set_current_waiting_monitor(NULL); |
|
1619 |
||
1620 |
guarantee (_recursions == 0, "invariant") ; |
|
1621 |
_recursions = save; // restore the old recursion count |
|
1622 |
_waiters--; // decrement the number of waiters |
|
1623 |
||
1624 |
// Verify a few postconditions |
|
1625 |
assert (_owner == Self , "invariant") ; |
|
1626 |
assert (_succ != Self , "invariant") ; |
|
1627 |
assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
|
1628 |
||
1629 |
if (SyncFlags & 32) { |
|
1630 |
OrderAccess::fence() ; |
|
1631 |
} |
|
1632 |
||
1633 |
// check if the notification happened |
|
1634 |
if (!WasNotified) { |
|
1635 |
// no, it could be timeout or Thread.interrupt() or both |
|
1636 |
// check for interrupt event, otherwise it is timeout |
|
1637 |
if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
|
1638 |
TEVENT (Wait - throw IEX from epilog) ; |
|
1639 |
THROW(vmSymbols::java_lang_InterruptedException()); |
|
1640 |
} |
|
1641 |
} |
|
1642 |
||
1643 |
// NOTE: Spurious wake up will be consider as timeout. |
|
1644 |
// Monitor notify has precedence over thread interrupt. |
|
1645 |
} |
|
1646 |
||
1647 |
||
1648 |
// Consider: |
|
1649 |
// If the lock is cool (cxq == null && succ == null) and we're on an MP system |
|
1650 |
// then instead of transferring a thread from the WaitSet to the EntryList |
|
1651 |
// we might just dequeue a thread from the WaitSet and directly unpark() it. |
|
1652 |
||
1653 |
void ObjectMonitor::notify(TRAPS) { |
|
1654 |
CHECK_OWNER(); |
|
1655 |
if (_WaitSet == NULL) { |
|
1656 |
TEVENT (Empty-Notify) ; |
|
1657 |
return ; |
|
1658 |
} |
|
1659 |
DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); |
|
1660 |
||
1661 |
int Policy = Knob_MoveNotifyee ; |
|
1662 |
||
1663 |
Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; |
|
1664 |
ObjectWaiter * iterator = DequeueWaiter() ; |
|
1665 |
if (iterator != NULL) { |
|
1666 |
TEVENT (Notify1 - Transfer) ; |
|
1667 |
guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
|
1668 |
guarantee (iterator->_notified == 0, "invariant") ; |
|
1669 |
if (Policy != 4) { |
|
1670 |
iterator->TState = ObjectWaiter::TS_ENTER ; |
|
1671 |
} |
|
1672 |
iterator->_notified = 1 ; |
|
18025 | 1673 |
Thread * Self = THREAD; |
1674 |
iterator->_notifier_tid = Self->osthread()->thread_id(); |
|
6975 | 1675 |
|
1676 |
ObjectWaiter * List = _EntryList ; |
|
1677 |
if (List != NULL) { |
|
1678 |
assert (List->_prev == NULL, "invariant") ; |
|
1679 |
assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
1680 |
assert (List != iterator, "invariant") ; |
|
1681 |
} |
|
1682 |
||
1683 |
if (Policy == 0) { // prepend to EntryList |
|
1684 |
if (List == NULL) { |
|
1685 |
iterator->_next = iterator->_prev = NULL ; |
|
1686 |
_EntryList = iterator ; |
|
1687 |
} else { |
|
1688 |
List->_prev = iterator ; |
|
1689 |
iterator->_next = List ; |
|
1690 |
iterator->_prev = NULL ; |
|
1691 |
_EntryList = iterator ; |
|
1692 |
} |
|
1693 |
} else |
|
1694 |
if (Policy == 1) { // append to EntryList |
|
1695 |
if (List == NULL) { |
|
1696 |
iterator->_next = iterator->_prev = NULL ; |
|
1697 |
_EntryList = iterator ; |
|
1698 |
} else { |
|
1699 |
// CONSIDER: finding the tail currently requires a linear-time walk of |
|
1700 |
// the EntryList. We can make tail access constant-time by converting to |
|
1701 |
// a CDLL instead of using our current DLL. |
|
1702 |
ObjectWaiter * Tail ; |
|
1703 |
for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
|
1704 |
assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
|
1705 |
Tail->_next = iterator ; |
|
1706 |
iterator->_prev = Tail ; |
|
1707 |
iterator->_next = NULL ; |
|
1708 |
} |
|
1709 |
} else |
|
1710 |
if (Policy == 2) { // prepend to cxq |
|
1711 |
// prepend to cxq |
|
1712 |
if (List == NULL) { |
|
1713 |
iterator->_next = iterator->_prev = NULL ; |
|
1714 |
_EntryList = iterator ; |
|
1715 |
} else { |
|
1716 |
iterator->TState = ObjectWaiter::TS_CXQ ; |
|
1717 |
for (;;) { |
|
1718 |
ObjectWaiter * Front = _cxq ; |
|
1719 |
iterator->_next = Front ; |
|
1720 |
if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
|
1721 |
break ; |
|
1722 |
} |
|
1723 |
} |
|
1724 |
} |
|
1725 |
} else |
|
1726 |
if (Policy == 3) { // append to cxq |
|
1727 |
iterator->TState = ObjectWaiter::TS_CXQ ; |
|
1728 |
for (;;) { |
|
1729 |
ObjectWaiter * Tail ; |
|
1730 |
Tail = _cxq ; |
|
1731 |
if (Tail == NULL) { |
|
1732 |
iterator->_next = NULL ; |
|
1733 |
if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
|
1734 |
break ; |
|
1735 |
} |
|
1736 |
} else { |
|
1737 |
while (Tail->_next != NULL) Tail = Tail->_next ; |
|
1738 |
Tail->_next = iterator ; |
|
1739 |
iterator->_prev = Tail ; |
|
1740 |
iterator->_next = NULL ; |
|
1741 |
break ; |
|
1742 |
} |
|
1743 |
} |
|
1744 |
} else { |
|
1745 |
ParkEvent * ev = iterator->_event ; |
|
1746 |
iterator->TState = ObjectWaiter::TS_RUN ; |
|
1747 |
OrderAccess::fence() ; |
|
1748 |
ev->unpark() ; |
|
1749 |
} |
|
1750 |
||
1751 |
if (Policy < 4) { |
|
1752 |
iterator->wait_reenter_begin(this); |
|
1753 |
} |
|
1754 |
||
1755 |
// _WaitSetLock protects the wait queue, not the EntryList. We could |
|
1756 |
// move the add-to-EntryList operation, above, outside the critical section |
|
1757 |
// protected by _WaitSetLock. In practice that's not useful. With the |
|
1758 |
// exception of wait() timeouts and interrupts the monitor owner |
|
1759 |
// is the only thread that grabs _WaitSetLock. There's almost no contention |
|
1760 |
// on _WaitSetLock so it's not profitable to reduce the length of the |
|
1761 |
// critical section. |
|
1762 |
} |
|
1763 |
||
1764 |
Thread::SpinRelease (&_WaitSetLock) ; |
|
1765 |
||
1766 |
if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) { |
|
1767 |
ObjectMonitor::_sync_Notifications->inc() ; |
|
1768 |
} |
|
1769 |
} |
|
1770 |
||
1771 |
||
1772 |
void ObjectMonitor::notifyAll(TRAPS) { |
|
1773 |
CHECK_OWNER(); |
|
1774 |
ObjectWaiter* iterator; |
|
1775 |
if (_WaitSet == NULL) { |
|
1776 |
TEVENT (Empty-NotifyAll) ; |
|
1777 |
return ; |
|
1778 |
} |
|
1779 |
DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); |
|
1780 |
||
1781 |
int Policy = Knob_MoveNotifyee ; |
|
1782 |
int Tally = 0 ; |
|
1783 |
Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; |
|
1784 |
||
1785 |
for (;;) { |
|
1786 |
iterator = DequeueWaiter () ; |
|
1787 |
if (iterator == NULL) break ; |
|
1788 |
TEVENT (NotifyAll - Transfer1) ; |
|
1789 |
++Tally ; |
|
1790 |
||
1791 |
// Disposition - what might we do with iterator ? |
|
1792 |
// a. add it directly to the EntryList - either tail or head. |
|
1793 |
// b. push it onto the front of the _cxq. |
|
1794 |
// For now we use (a). |
|
1795 |
||
1796 |
guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
|
1797 |
guarantee (iterator->_notified == 0, "invariant") ; |
|
1798 |
iterator->_notified = 1 ; |
|
18025 | 1799 |
Thread * Self = THREAD; |
1800 |
iterator->_notifier_tid = Self->osthread()->thread_id(); |
|
6975 | 1801 |
if (Policy != 4) { |
1802 |
iterator->TState = ObjectWaiter::TS_ENTER ; |
|
1803 |
} |
|
1804 |
||
1805 |
ObjectWaiter * List = _EntryList ; |
|
1806 |
if (List != NULL) { |
|
1807 |
assert (List->_prev == NULL, "invariant") ; |
|
1808 |
assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
|
1809 |
assert (List != iterator, "invariant") ; |
|
1810 |
} |
|
1811 |
||
1812 |
if (Policy == 0) { // prepend to EntryList |
|
1813 |
if (List == NULL) { |
|
1814 |
iterator->_next = iterator->_prev = NULL ; |
|
1815 |
_EntryList = iterator ; |
|
1816 |
} else { |
|
1817 |
List->_prev = iterator ; |
|
1818 |
iterator->_next = List ; |
|
1819 |
iterator->_prev = NULL ; |
|
1820 |
_EntryList = iterator ; |
|
1821 |
} |
|
1822 |
} else |
|
1823 |
if (Policy == 1) { // append to EntryList |
|
1824 |
if (List == NULL) { |
|
1825 |
iterator->_next = iterator->_prev = NULL ; |
|
1826 |
_EntryList = iterator ; |
|
1827 |
} else { |
|
1828 |
// CONSIDER: finding the tail currently requires a linear-time walk of |
|
1829 |
// the EntryList. We can make tail access constant-time by converting to |
|
1830 |
// a CDLL instead of using our current DLL. |
|
1831 |
ObjectWaiter * Tail ; |
|
1832 |
for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
|
1833 |
assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
|
1834 |
Tail->_next = iterator ; |
|
1835 |
iterator->_prev = Tail ; |
|
1836 |
iterator->_next = NULL ; |
|
1837 |
} |
|
1838 |
} else |
|
1839 |
if (Policy == 2) { // prepend to cxq |
|
1840 |
// prepend to cxq |
|
1841 |
iterator->TState = ObjectWaiter::TS_CXQ ; |
|
1842 |
for (;;) { |
|
1843 |
ObjectWaiter * Front = _cxq ; |
|
1844 |
iterator->_next = Front ; |
|
1845 |
if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
|
1846 |
break ; |
|
1847 |
} |
|
1848 |
} |
|
1849 |
} else |
|
1850 |
if (Policy == 3) { // append to cxq |
|
1851 |
iterator->TState = ObjectWaiter::TS_CXQ ; |
|
1852 |
for (;;) { |
|
1853 |
ObjectWaiter * Tail ; |
|
1854 |
Tail = _cxq ; |
|
1855 |
if (Tail == NULL) { |
|
1856 |
iterator->_next = NULL ; |
|
1857 |
if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
|
1858 |
break ; |
|
1859 |
} |
|
1860 |
} else { |
|
1861 |
while (Tail->_next != NULL) Tail = Tail->_next ; |
|
1862 |
Tail->_next = iterator ; |
|
1863 |
iterator->_prev = Tail ; |
|
1864 |
iterator->_next = NULL ; |
|
1865 |
break ; |
|
1866 |
} |
|
1867 |
} |
|
1868 |
} else { |
|
1869 |
ParkEvent * ev = iterator->_event ; |
|
1870 |
iterator->TState = ObjectWaiter::TS_RUN ; |
|
1871 |
OrderAccess::fence() ; |
|
1872 |
ev->unpark() ; |
|
1873 |
} |
|
1874 |
||
1875 |
if (Policy < 4) { |
|
1876 |
iterator->wait_reenter_begin(this); |
|
1877 |
} |
|
1878 |
||
1879 |
// _WaitSetLock protects the wait queue, not the EntryList. We could |
|
1880 |
// move the add-to-EntryList operation, above, outside the critical section |
|
1881 |
// protected by _WaitSetLock. In practice that's not useful. With the |
|
1882 |
// exception of wait() timeouts and interrupts the monitor owner |
|
1883 |
// is the only thread that grabs _WaitSetLock. There's almost no contention |
|
1884 |
// on _WaitSetLock so it's not profitable to reduce the length of the |
|
1885 |
// critical section. |
|
1886 |
} |
|
1887 |
||
1888 |
Thread::SpinRelease (&_WaitSetLock) ; |
|
1889 |
||
1890 |
if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) { |
|
1891 |
ObjectMonitor::_sync_Notifications->inc(Tally) ; |
|
1892 |
} |
|
1893 |
} |
|
1894 |
||
1895 |
// ----------------------------------------------------------------------------- |
|
1896 |
// Adaptive Spinning Support |
|
1897 |
// |
|
1898 |
// Adaptive spin-then-block - rational spinning |
|
1899 |
// |
|
1900 |
// Note that we spin "globally" on _owner with a classic SMP-polite TATAS |
|
1901 |
// algorithm. On high order SMP systems it would be better to start with |
|
1902 |
// a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, |
|
1903 |
// a contending thread could enqueue itself on the cxq and then spin locally |
|
1904 |
// on a thread-specific variable such as its ParkEvent._Event flag. |
|
1905 |
// That's left as an exercise for the reader. Note that global spinning is |
|
1906 |
// not problematic on Niagara, as the L2$ serves the interconnect and has both |
|
1907 |
// low latency and massive bandwidth. |
|
1908 |
// |
|
1909 |
// Broadly, we can fix the spin frequency -- that is, the % of contended lock |
|
1910 |
// acquisition attempts where we opt to spin -- at 100% and vary the spin count |
|
1911 |
// (duration) or we can fix the count at approximately the duration of |
|
1912 |
// a context switch and vary the frequency. Of course we could also |
|
1913 |
// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. |
|
1914 |
// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. |
|
1915 |
// |
|
1916 |
// This implementation varies the duration "D", where D varies with |
|
1917 |
// the success rate of recent spin attempts. (D is capped at approximately |
|
1918 |
// length of a round-trip context switch). The success rate for recent |
|
1919 |
// spin attempts is a good predictor of the success rate of future spin |
|
1920 |
// attempts. The mechanism adapts automatically to varying critical |
|
1921 |
// section length (lock modality), system load and degree of parallelism. |
|
1922 |
// D is maintained per-monitor in _SpinDuration and is initialized |
|
1923 |
// optimistically. Spin frequency is fixed at 100%. |
|
1924 |
// |
|
1925 |
// Note that _SpinDuration is volatile, but we update it without locks |
|
1926 |
// or atomics. The code is designed so that _SpinDuration stays within |
|
1927 |
// a reasonable range even in the presence of races. The arithmetic |
|
1928 |
// operations on _SpinDuration are closed over the domain of legal values, |
|
1929 |
// so at worst a race will install and older but still legal value. |
|
1930 |
// At the very worst this introduces some apparent non-determinism. |
|
1931 |
// We might spin when we shouldn't or vice-versa, but since the spin |
|
1932 |
// count are relatively short, even in the worst case, the effect is harmless. |
|
1933 |
// |
|
1934 |
// Care must be taken that a low "D" value does not become an |
|
1935 |
// an absorbing state. Transient spinning failures -- when spinning |
|
1936 |
// is overall profitable -- should not cause the system to converge |
|
1937 |
// on low "D" values. We want spinning to be stable and predictable |
|
1938 |
// and fairly responsive to change and at the same time we don't want |
|
1939 |
// it to oscillate, become metastable, be "too" non-deterministic, |
|
1940 |
// or converge on or enter undesirable stable absorbing states. |
|
1941 |
// |
|
1942 |
// We implement a feedback-based control system -- using past behavior |
|
1943 |
// to predict future behavior. We face two issues: (a) if the |
|
1944 |
// input signal is random then the spin predictor won't provide optimal |
|
1945 |
// results, and (b) if the signal frequency is too high then the control |
|
1946 |
// system, which has some natural response lag, will "chase" the signal. |
|
1947 |
// (b) can arise from multimodal lock hold times. Transient preemption |
|
1948 |
// can also result in apparent bimodal lock hold times. |
|
1949 |
// Although sub-optimal, neither condition is particularly harmful, as |
|
1950 |
// in the worst-case we'll spin when we shouldn't or vice-versa. |
|
1951 |
// The maximum spin duration is rather short so the failure modes aren't bad. |
|
1952 |
// To be conservative, I've tuned the gain in system to bias toward |
|
1953 |
// _not spinning. Relatedly, the system can sometimes enter a mode where it |
|
1954 |
// "rings" or oscillates between spinning and not spinning. This happens |
|
1955 |
// when spinning is just on the cusp of profitability, however, so the |
|
1956 |
// situation is not dire. The state is benign -- there's no need to add |
|
1957 |
// hysteresis control to damp the transition rate between spinning and |
|
1958 |
// not spinning. |
|
1959 |
// |
|
1960 |
||
1961 |
intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; |
|
1962 |
int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; |
|
1963 |
||
1964 |
// Spinning: Fixed frequency (100%), vary duration |
|
1965 |
||
1966 |
||
1967 |
int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { |
|
1968 |
||
1969 |
// Dumb, brutal spin. Good for comparative measurements against adaptive spinning. |
|
1970 |
int ctr = Knob_FixedSpin ; |
|
1971 |
if (ctr != 0) { |
|
1972 |
while (--ctr >= 0) { |
|
1973 |
if (TryLock (Self) > 0) return 1 ; |
|
1974 |
SpinPause () ; |
|
1975 |
} |
|
1976 |
return 0 ; |
|
1977 |
} |
|
1978 |
||
1979 |
for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { |
|
1980 |
if (TryLock(Self) > 0) { |
|
1981 |
// Increase _SpinDuration ... |
|
1982 |
// Note that we don't clamp SpinDuration precisely at SpinLimit. |
|
1983 |
// Raising _SpurDuration to the poverty line is key. |
|
1984 |
int x = _SpinDuration ; |
|
1985 |
if (x < Knob_SpinLimit) { |
|
1986 |
if (x < Knob_Poverty) x = Knob_Poverty ; |
|
1987 |
_SpinDuration = x + Knob_BonusB ; |
|
1988 |
} |
|
1989 |
return 1 ; |
|
1990 |
} |
|
1991 |
SpinPause () ; |
|
1992 |
} |
|
1993 |
||
1994 |
// Admission control - verify preconditions for spinning |
|
1995 |
// |
|
1996 |
// We always spin a little bit, just to prevent _SpinDuration == 0 from |
|
1997 |
// becoming an absorbing state. Put another way, we spin briefly to |
|
1998 |
// sample, just in case the system load, parallelism, contention, or lock |
|
1999 |
// modality changed. |
|
2000 |
// |
|
2001 |
// Consider the following alternative: |
|
2002 |
// Periodically set _SpinDuration = _SpinLimit and try a long/full |
|
2003 |
// spin attempt. "Periodically" might mean after a tally of |
|
2004 |
// the # of failed spin attempts (or iterations) reaches some threshold. |
|
2005 |
// This takes us into the realm of 1-out-of-N spinning, where we |
|
2006 |
// hold the duration constant but vary the frequency. |
|
2007 |
||
2008 |
ctr = _SpinDuration ; |
|
2009 |
if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; |
|
2010 |
if (ctr <= 0) return 0 ; |
|
2011 |
||
2012 |
if (Knob_SuccRestrict && _succ != NULL) return 0 ; |
|
2013 |
if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { |
|
2014 |
TEVENT (Spin abort - notrunnable [TOP]); |
|
2015 |
return 0 ; |
|
2016 |
} |
|
2017 |
||
2018 |
int MaxSpin = Knob_MaxSpinners ; |
|
2019 |
if (MaxSpin >= 0) { |
|
2020 |
if (_Spinner > MaxSpin) { |
|
2021 |
TEVENT (Spin abort -- too many spinners) ; |
|
2022 |
return 0 ; |
|
2023 |
} |
|
22551 | 2024 |
// Slightly racy, but benign ... |
6975 | 2025 |
Adjust (&_Spinner, 1) ; |
2026 |
} |
|
2027 |
||
2028 |
// We're good to spin ... spin ingress. |
|
2029 |
// CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades |
|
2030 |
// when preparing to LD...CAS _owner, etc and the CAS is likely |
|
2031 |
// to succeed. |
|
2032 |
int hits = 0 ; |
|
2033 |
int msk = 0 ; |
|
2034 |
int caspty = Knob_CASPenalty ; |
|
2035 |
int oxpty = Knob_OXPenalty ; |
|
2036 |
int sss = Knob_SpinSetSucc ; |
|
2037 |
if (sss && _succ == NULL ) _succ = Self ; |
|
2038 |
Thread * prv = NULL ; |
|
2039 |
||
2040 |
// There are three ways to exit the following loop: |
|
2041 |
// 1. A successful spin where this thread has acquired the lock. |
|
2042 |
// 2. Spin failure with prejudice |
|
2043 |
// 3. Spin failure without prejudice |
|
2044 |
||
2045 |
while (--ctr >= 0) { |
|
2046 |
||
2047 |
// Periodic polling -- Check for pending GC |
|
2048 |
// Threads may spin while they're unsafe. |
|
2049 |
// We don't want spinning threads to delay the JVM from reaching |
|
2050 |
// a stop-the-world safepoint or to steal cycles from GC. |
|
2051 |
// If we detect a pending safepoint we abort in order that |
|
2052 |
// (a) this thread, if unsafe, doesn't delay the safepoint, and (b) |
|
2053 |
// this thread, if safe, doesn't steal cycles from GC. |
|
2054 |
// This is in keeping with the "no loitering in runtime" rule. |
|
2055 |
// We periodically check to see if there's a safepoint pending. |
|
2056 |
if ((ctr & 0xFF) == 0) { |
|
2057 |
if (SafepointSynchronize::do_call_back()) { |
|
2058 |
TEVENT (Spin: safepoint) ; |
|
2059 |
goto Abort ; // abrupt spin egress |
|
2060 |
} |
|
2061 |
if (Knob_UsePause & 1) SpinPause () ; |
|
2062 |
||
2063 |
int (*scb)(intptr_t,int) = SpinCallbackFunction ; |
|
2064 |
if (hits > 50 && scb != NULL) { |
|
2065 |
int abend = (*scb)(SpinCallbackArgument, 0) ; |
|
2066 |
} |
|
2067 |
} |
|
2068 |
||
2069 |
if (Knob_UsePause & 2) SpinPause() ; |
|
2070 |
||
2071 |
// Exponential back-off ... Stay off the bus to reduce coherency traffic. |
|
2072 |
// This is useful on classic SMP systems, but is of less utility on |
|
2073 |
// N1-style CMT platforms. |
|
2074 |
// |
|
2075 |
// Trade-off: lock acquisition latency vs coherency bandwidth. |
|
2076 |
// Lock hold times are typically short. A histogram |
|
2077 |
// of successful spin attempts shows that we usually acquire |
|
2078 |
// the lock early in the spin. That suggests we want to |
|
2079 |
// sample _owner frequently in the early phase of the spin, |
|
2080 |
// but then back-off and sample less frequently as the spin |
|
2081 |
// progresses. The back-off makes a good citizen on SMP big |
|
2082 |
// SMP systems. Oversampling _owner can consume excessive |
|
2083 |
// coherency bandwidth. Relatedly, if we _oversample _owner we |
|
2084 |
// can inadvertently interfere with the the ST m->owner=null. |
|
2085 |
// executed by the lock owner. |
|
2086 |
if (ctr & msk) continue ; |
|
2087 |
++hits ; |
|
2088 |
if ((hits & 0xF) == 0) { |
|
2089 |
// The 0xF, above, corresponds to the exponent. |
|
2090 |
// Consider: (msk+1)|msk |
|
2091 |
msk = ((msk << 2)|3) & BackOffMask ; |
|
2092 |
} |
|
2093 |
||
2094 |
// Probe _owner with TATAS |
|
2095 |
// If this thread observes the monitor transition or flicker |
|
2096 |
// from locked to unlocked to locked, then the odds that this |
|
2097 |
// thread will acquire the lock in this spin attempt go down |
|
2098 |
// considerably. The same argument applies if the CAS fails |
|
2099 |
// or if we observe _owner change from one non-null value to |
|
2100 |
// another non-null value. In such cases we might abort |
|
2101 |
// the spin without prejudice or apply a "penalty" to the |
|
2102 |
// spin count-down variable "ctr", reducing it by 100, say. |
|
2103 |
||
2104 |
Thread * ox = (Thread *) _owner ; |
|
2105 |
if (ox == NULL) { |
|
2106 |
ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
|
2107 |
if (ox == NULL) { |
|
2108 |
// The CAS succeeded -- this thread acquired ownership |
|
2109 |
// Take care of some bookkeeping to exit spin state. |
|
2110 |
if (sss && _succ == Self) { |
|
2111 |
_succ = NULL ; |
|
2112 |
} |
|
2113 |
if (MaxSpin > 0) Adjust (&_Spinner, -1) ; |
|
2114 |
||
2115 |
// Increase _SpinDuration : |
|
2116 |
// The spin was successful (profitable) so we tend toward |
|
2117 |
// longer spin attempts in the future. |
|
2118 |
// CONSIDER: factor "ctr" into the _SpinDuration adjustment. |
|
2119 |
// If we acquired the lock early in the spin cycle it |
|
2120 |
// makes sense to increase _SpinDuration proportionally. |
|
2121 |
// Note that we don't clamp SpinDuration precisely at SpinLimit. |
|
2122 |
int x = _SpinDuration ; |
|
2123 |
if (x < Knob_SpinLimit) { |
|
2124 |
if (x < Knob_Poverty) x = Knob_Poverty ; |
|
2125 |
_SpinDuration = x + Knob_Bonus ; |
|
2126 |
} |
|
2127 |
return 1 ; |
|
2128 |
} |
|
2129 |
||
2130 |
// The CAS failed ... we can take any of the following actions: |
|
2131 |
// * penalize: ctr -= Knob_CASPenalty |
|
2132 |
// * exit spin with prejudice -- goto Abort; |
|
2133 |
// * exit spin without prejudice. |
|
2134 |
// * Since CAS is high-latency, retry again immediately. |
|
2135 |
prv = ox ; |
|
2136 |
TEVENT (Spin: cas failed) ; |
|
2137 |
if (caspty == -2) break ; |
|
2138 |
if (caspty == -1) goto Abort ; |
|
2139 |
ctr -= caspty ; |
|
2140 |
continue ; |
|
2141 |
} |
|
2142 |
||
2143 |
// Did lock ownership change hands ? |
|
2144 |
if (ox != prv && prv != NULL ) { |
|
2145 |
TEVENT (spin: Owner changed) |
|
2146 |
if (oxpty == -2) break ; |
|
2147 |
if (oxpty == -1) goto Abort ; |
|
2148 |
ctr -= oxpty ; |
|
2149 |
} |
|
2150 |
prv = ox ; |
|
2151 |
||
2152 |
// Abort the spin if the owner is not executing. |
|
2153 |
// The owner must be executing in order to drop the lock. |
|
2154 |
// Spinning while the owner is OFFPROC is idiocy. |
|
2155 |
// Consider: ctr -= RunnablePenalty ; |
|
2156 |
if (Knob_OState && NotRunnable (Self, ox)) { |
|
2157 |
TEVENT (Spin abort - notrunnable); |
|
2158 |
goto Abort ; |
|
2159 |
} |
|
2160 |
if (sss && _succ == NULL ) _succ = Self ; |
|
2161 |
} |
|
2162 |
||
2163 |
// Spin failed with prejudice -- reduce _SpinDuration. |
|
2164 |
// TODO: Use an AIMD-like policy to adjust _SpinDuration. |
|
2165 |
// AIMD is globally stable. |
|
2166 |
TEVENT (Spin failure) ; |
|
2167 |
{ |
|
2168 |
int x = _SpinDuration ; |
|
2169 |
if (x > 0) { |
|
2170 |
// Consider an AIMD scheme like: x -= (x >> 3) + 100 |
|
2171 |
// This is globally sample and tends to damp the response. |
|
2172 |
x -= Knob_Penalty ; |
|
2173 |
if (x < 0) x = 0 ; |
|
2174 |
_SpinDuration = x ; |
|
2175 |
} |
|
2176 |
} |
|
2177 |
||
2178 |
Abort: |
|
2179 |
if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; |
|
2180 |
if (sss && _succ == Self) { |
|
2181 |
_succ = NULL ; |
|
2182 |
// Invariant: after setting succ=null a contending thread |
|
2183 |
// must recheck-retry _owner before parking. This usually happens |
|
2184 |
// in the normal usage of TrySpin(), but it's safest |
|
2185 |
// to make TrySpin() as foolproof as possible. |
|
2186 |
OrderAccess::fence() ; |
|
2187 |
if (TryLock(Self) > 0) return 1 ; |
|
2188 |
} |
|
2189 |
return 0 ; |
|
2190 |
} |
|
2191 |
||
2192 |
// NotRunnable() -- informed spinning |
|
2193 |
// |
|
2194 |
// Don't bother spinning if the owner is not eligible to drop the lock. |
|
2195 |
// Peek at the owner's schedctl.sc_state and Thread._thread_values and |
|
2196 |
// spin only if the owner thread is _thread_in_Java or _thread_in_vm. |
|
2197 |
// The thread must be runnable in order to drop the lock in timely fashion. |
|
2198 |
// If the _owner is not runnable then spinning will not likely be |
|
2199 |
// successful (profitable). |
|
2200 |
// |
|
2201 |
// Beware -- the thread referenced by _owner could have died |
|
2202 |
// so a simply fetch from _owner->_thread_state might trap. |
|
2203 |
// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. |
|
2204 |
// Because of the lifecycle issues the schedctl and _thread_state values |
|
2205 |
// observed by NotRunnable() might be garbage. NotRunnable must |
|
2206 |
// tolerate this and consider the observed _thread_state value |
|
2207 |
// as advisory. |
|
2208 |
// |
|
2209 |
// Beware too, that _owner is sometimes a BasicLock address and sometimes |
|
2210 |
// a thread pointer. We differentiate the two cases with OwnerIsThread. |
|
2211 |
// Alternately, we might tag the type (thread pointer vs basiclock pointer) |
|
2212 |
// with the LSB of _owner. Another option would be to probablistically probe |
|
2213 |
// the putative _owner->TypeTag value. |
|
2214 |
// |
|
2215 |
// Checking _thread_state isn't perfect. Even if the thread is |
|
2216 |
// in_java it might be blocked on a page-fault or have been preempted |
|
2217 |
// and sitting on a ready/dispatch queue. _thread state in conjunction |
|
2218 |
// with schedctl.sc_state gives us a good picture of what the |
|
2219 |
// thread is doing, however. |
|
2220 |
// |
|
2221 |
// TODO: check schedctl.sc_state. |
|
2222 |
// We'll need to use SafeFetch32() to read from the schedctl block. |
|
2223 |
// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ |
|
2224 |
// |
|
2225 |
// The return value from NotRunnable() is *advisory* -- the |
|
2226 |
// result is based on sampling and is not necessarily coherent. |
|
2227 |
// The caller must tolerate false-negative and false-positive errors. |
|
2228 |
// Spinning, in general, is probabilistic anyway. |
|
2229 |
||
2230 |
||
2231 |
int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { |
|
2232 |
// Check either OwnerIsThread or ox->TypeTag == 2BAD. |
|
2233 |
if (!OwnerIsThread) return 0 ; |
|
2234 |
||
2235 |
if (ox == NULL) return 0 ; |
|
2236 |
||
2237 |
// Avoid transitive spinning ... |
|
2238 |
// Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. |
|
2239 |
// Immediately after T1 acquires L it's possible that T2, also |
|
2240 |
// spinning on L, will see L.Owner=T1 and T1._Stalled=L. |
|
2241 |
// This occurs transiently after T1 acquired L but before |
|
2242 |
// T1 managed to clear T1.Stalled. T2 does not need to abort |
|
2243 |
// its spin in this circumstance. |
|
2244 |
intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; |
|
2245 |
||
2246 |
if (BlockedOn == 1) return 1 ; |
|
2247 |
if (BlockedOn != 0) { |
|
2248 |
return BlockedOn != intptr_t(this) && _owner == ox ; |
|
2249 |
} |
|
2250 |
||
2251 |
assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; |
|
2252 |
int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; |
|
2253 |
// consider also: jst != _thread_in_Java -- but that's overspecific. |
|
2254 |
return jst == _thread_blocked || jst == _thread_in_native ; |
|
2255 |
} |
|
2256 |
||
2257 |
||
2258 |
// ----------------------------------------------------------------------------- |
|
2259 |
// WaitSet management ... |
|
2260 |
||
2261 |
ObjectWaiter::ObjectWaiter(Thread* thread) { |
|
2262 |
_next = NULL; |
|
2263 |
_prev = NULL; |
|
2264 |
_notified = 0; |
|
2265 |
TState = TS_RUN ; |
|
2266 |
_thread = thread; |
|
2267 |
_event = thread->_ParkEvent ; |
|
2268 |
_active = false; |
|
2269 |
assert (_event != NULL, "invariant") ; |
|
2270 |
} |
|
2271 |
||
2272 |
void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) { |
|
2273 |
JavaThread *jt = (JavaThread *)this->_thread; |
|
2274 |
_active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); |
|
2275 |
} |
|
2276 |
||
2277 |
void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) { |
|
2278 |
JavaThread *jt = (JavaThread *)this->_thread; |
|
2279 |
JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); |
|
2280 |
} |
|
2281 |
||
2282 |
inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { |
|
2283 |
assert(node != NULL, "should not dequeue NULL node"); |
|
2284 |
assert(node->_prev == NULL, "node already in list"); |
|
2285 |
assert(node->_next == NULL, "node already in list"); |
|
2286 |
// put node at end of queue (circular doubly linked list) |
|
2287 |
if (_WaitSet == NULL) { |
|
2288 |
_WaitSet = node; |
|
2289 |
node->_prev = node; |
|
2290 |
node->_next = node; |
|
2291 |
} else { |
|
2292 |
ObjectWaiter* head = _WaitSet ; |
|
2293 |
ObjectWaiter* tail = head->_prev; |
|
2294 |
assert(tail->_next == head, "invariant check"); |
|
2295 |
tail->_next = node; |
|
2296 |
head->_prev = node; |
|
2297 |
node->_next = head; |
|
2298 |
node->_prev = tail; |
|
2299 |
} |
|
2300 |
} |
|
2301 |
||
2302 |
inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { |
|
2303 |
// dequeue the very first waiter |
|
2304 |
ObjectWaiter* waiter = _WaitSet; |
|
2305 |
if (waiter) { |
|
2306 |
DequeueSpecificWaiter(waiter); |
|
2307 |
} |
|
2308 |
return waiter; |
|
2309 |
} |
|
2310 |
||
2311 |
inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { |
|
2312 |
assert(node != NULL, "should not dequeue NULL node"); |
|
2313 |
assert(node->_prev != NULL, "node already removed from list"); |
|
2314 |
assert(node->_next != NULL, "node already removed from list"); |
|
2315 |
// when the waiter has woken up because of interrupt, |
|
2316 |
// timeout or other spurious wake-up, dequeue the |
|
2317 |
// waiter from waiting list |
|
2318 |
ObjectWaiter* next = node->_next; |
|
2319 |
if (next == node) { |
|
2320 |
assert(node->_prev == node, "invariant check"); |
|
2321 |
_WaitSet = NULL; |
|
2322 |
} else { |
|
2323 |
ObjectWaiter* prev = node->_prev; |
|
2324 |
assert(prev->_next == node, "invariant check"); |
|
2325 |
assert(next->_prev == node, "invariant check"); |
|
2326 |
next->_prev = prev; |
|
2327 |
prev->_next = next; |
|
2328 |
if (_WaitSet == node) { |
|
2329 |
_WaitSet = next; |
|
2330 |
} |
|
2331 |
} |
|
2332 |
node->_next = NULL; |
|
2333 |
node->_prev = NULL; |
|
2334 |
} |
|
2335 |
||
2336 |
// ----------------------------------------------------------------------------- |
|
2337 |
// PerfData support |
|
2338 |
PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL ; |
|
2339 |
PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL ; |
|
2340 |
PerfCounter * ObjectMonitor::_sync_Parks = NULL ; |
|
2341 |
PerfCounter * ObjectMonitor::_sync_EmptyNotifications = NULL ; |
|
2342 |
PerfCounter * ObjectMonitor::_sync_Notifications = NULL ; |
|
2343 |
PerfCounter * ObjectMonitor::_sync_PrivateA = NULL ; |
|
2344 |
PerfCounter * ObjectMonitor::_sync_PrivateB = NULL ; |
|
2345 |
PerfCounter * ObjectMonitor::_sync_SlowExit = NULL ; |
|
2346 |
PerfCounter * ObjectMonitor::_sync_SlowEnter = NULL ; |
|
2347 |
PerfCounter * ObjectMonitor::_sync_SlowNotify = NULL ; |
|
2348 |
PerfCounter * ObjectMonitor::_sync_SlowNotifyAll = NULL ; |
|
2349 |
PerfCounter * ObjectMonitor::_sync_FailedSpins = NULL ; |
|
2350 |
PerfCounter * ObjectMonitor::_sync_SuccessfulSpins = NULL ; |
|
2351 |
PerfCounter * ObjectMonitor::_sync_MonInCirculation = NULL ; |
|
2352 |
PerfCounter * ObjectMonitor::_sync_MonScavenged = NULL ; |
|
2353 |
PerfCounter * ObjectMonitor::_sync_Inflations = NULL ; |
|
2354 |
PerfCounter * ObjectMonitor::_sync_Deflations = NULL ; |
|
2355 |
PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL ; |
|
2356 |
||
2357 |
// One-shot global initialization for the sync subsystem. |
|
2358 |
// We could also defer initialization and initialize on-demand |
|
2359 |
// the first time we call inflate(). Initialization would |
|
2360 |
// be protected - like so many things - by the MonitorCache_lock. |
|
2361 |
||
2362 |
void ObjectMonitor::Initialize () { |
|
2363 |
static int InitializationCompleted = 0 ; |
|
2364 |
assert (InitializationCompleted == 0, "invariant") ; |
|
2365 |
InitializationCompleted = 1 ; |
|
2366 |
if (UsePerfData) { |
|
2367 |
EXCEPTION_MARK ; |
|
2368 |
#define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } |
|
2369 |
#define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } |
|
2370 |
NEWPERFCOUNTER(_sync_Inflations) ; |
|
2371 |
NEWPERFCOUNTER(_sync_Deflations) ; |
|
2372 |
NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; |
|
2373 |
NEWPERFCOUNTER(_sync_FutileWakeups) ; |
|
2374 |
NEWPERFCOUNTER(_sync_Parks) ; |
|
2375 |
NEWPERFCOUNTER(_sync_EmptyNotifications) ; |
|
2376 |
NEWPERFCOUNTER(_sync_Notifications) ; |
|
2377 |
NEWPERFCOUNTER(_sync_SlowEnter) ; |
|
2378 |
NEWPERFCOUNTER(_sync_SlowExit) ; |
|
2379 |
NEWPERFCOUNTER(_sync_SlowNotify) ; |
|
2380 |
NEWPERFCOUNTER(_sync_SlowNotifyAll) ; |
|
2381 |
NEWPERFCOUNTER(_sync_FailedSpins) ; |
|
2382 |
NEWPERFCOUNTER(_sync_SuccessfulSpins) ; |
|
2383 |
NEWPERFCOUNTER(_sync_PrivateA) ; |
|
2384 |
NEWPERFCOUNTER(_sync_PrivateB) ; |
|
2385 |
NEWPERFCOUNTER(_sync_MonInCirculation) ; |
|
2386 |
NEWPERFCOUNTER(_sync_MonScavenged) ; |
|
2387 |
NEWPERFVARIABLE(_sync_MonExtant) ; |
|
2388 |
#undef NEWPERFCOUNTER |
|
2389 |
} |
|
2390 |
} |
|
2391 |
||
2392 |
||
2393 |
// Compile-time asserts |
|
2394 |
// When possible, it's better to catch errors deterministically at |
|
2395 |
// compile-time than at runtime. The down-side to using compile-time |
|
2396 |
// asserts is that error message -- often something about negative array |
|
2397 |
// indices -- is opaque. |
|
2398 |
||
2399 |
#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } |
|
2400 |
||
2401 |
void ObjectMonitor::ctAsserts() { |
|
2402 |
CTASSERT(offset_of (ObjectMonitor, _header) == 0); |
|
2403 |
} |
|
2404 |
||
2405 |
||
2406 |
static char * kvGet (char * kvList, const char * Key) { |
|
2407 |
if (kvList == NULL) return NULL ; |
|
2408 |
size_t n = strlen (Key) ; |
|
2409 |
char * Search ; |
|
2410 |
for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { |
|
2411 |
if (strncmp (Search, Key, n) == 0) { |
|
2412 |
if (Search[n] == '=') return Search + n + 1 ; |
|
2413 |
if (Search[n] == 0) return (char *) "1" ; |
|
2414 |
} |
|
2415 |
} |
|
2416 |
return NULL ; |
|
2417 |
} |
|
2418 |
||
2419 |
static int kvGetInt (char * kvList, const char * Key, int Default) { |
|
2420 |
char * v = kvGet (kvList, Key) ; |
|
2421 |
int rslt = v ? ::strtol (v, NULL, 0) : Default ; |
|
2422 |
if (Knob_ReportSettings && v != NULL) { |
|
2423 |
::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; |
|
2424 |
::fflush (stdout) ; |
|
2425 |
} |
|
2426 |
return rslt ; |
|
2427 |
} |
|
2428 |
||
2429 |
void ObjectMonitor::DeferredInitialize () { |
|
2430 |
if (InitDone > 0) return ; |
|
2431 |
if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { |
|
2432 |
while (InitDone != 1) ; |
|
2433 |
return ; |
|
2434 |
} |
|
2435 |
||
2436 |
// One-shot global initialization ... |
|
2437 |
// The initialization is idempotent, so we don't need locks. |
|
2438 |
// In the future consider doing this via os::init_2(). |
|
2439 |
// SyncKnobs consist of <Key>=<Value> pairs in the style |
|
2440 |
// of environment variables. Start by converting ':' to NUL. |
|
2441 |
||
2442 |
if (SyncKnobs == NULL) SyncKnobs = "" ; |
|
2443 |
||
2444 |
size_t sz = strlen (SyncKnobs) ; |
|
2445 |
char * knobs = (char *) malloc (sz + 2) ; |
|
2446 |
if (knobs == NULL) { |
|
17087
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
15234
diff
changeset
|
2447 |
vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ; |
6975 | 2448 |
guarantee (0, "invariant") ; |
2449 |
} |
|
2450 |
strcpy (knobs, SyncKnobs) ; |
|
2451 |
knobs[sz+1] = 0 ; |
|
2452 |
for (char * p = knobs ; *p ; p++) { |
|
2453 |
if (*p == ':') *p = 0 ; |
|
2454 |
} |
|
2455 |
||
2456 |
#define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } |
|
2457 |
SETKNOB(ReportSettings) ; |
|
2458 |
SETKNOB(Verbose) ; |
|
2459 |
SETKNOB(FixedSpin) ; |
|
2460 |
SETKNOB(SpinLimit) ; |
|
2461 |
SETKNOB(SpinBase) ; |
|
2462 |
SETKNOB(SpinBackOff); |
|
2463 |
SETKNOB(CASPenalty) ; |
|
2464 |
SETKNOB(OXPenalty) ; |
|
2465 |
SETKNOB(LogSpins) ; |
|
2466 |
SETKNOB(SpinSetSucc) ; |
|
2467 |
SETKNOB(SuccEnabled) ; |
|
2468 |
SETKNOB(SuccRestrict) ; |
|
2469 |
SETKNOB(Penalty) ; |
|
2470 |
SETKNOB(Bonus) ; |
|
2471 |
SETKNOB(BonusB) ; |
|
2472 |
SETKNOB(Poverty) ; |
|
2473 |
SETKNOB(SpinAfterFutile) ; |
|
2474 |
SETKNOB(UsePause) ; |
|
2475 |
SETKNOB(SpinEarly) ; |
|
2476 |
SETKNOB(OState) ; |
|
2477 |
SETKNOB(MaxSpinners) ; |
|
2478 |
SETKNOB(PreSpin) ; |
|
2479 |
SETKNOB(ExitPolicy) ; |
|
2480 |
SETKNOB(QMode); |
|
2481 |
SETKNOB(ResetEvent) ; |
|
2482 |
SETKNOB(MoveNotifyee) ; |
|
2483 |
SETKNOB(FastHSSEC) ; |
|
2484 |
#undef SETKNOB |
|
2485 |
||
2486 |
if (os::is_MP()) { |
|
2487 |
BackOffMask = (1 << Knob_SpinBackOff) - 1 ; |
|
2488 |
if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; |
|
2489 |
// CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) |
|
2490 |
} else { |
|
2491 |
Knob_SpinLimit = 0 ; |
|
2492 |
Knob_SpinBase = 0 ; |
|
2493 |
Knob_PreSpin = 0 ; |
|
2494 |
Knob_FixedSpin = -1 ; |
|
2495 |
} |
|
2496 |
||
2497 |
if (Knob_LogSpins == 0) { |
|
2498 |
ObjectMonitor::_sync_FailedSpins = NULL ; |
|
2499 |
} |
|
2500 |
||
2501 |
free (knobs) ; |
|
2502 |
OrderAccess::fence() ; |
|
2503 |
InitDone = 1 ; |
|
2504 |
} |
|
2505 |
||
2506 |
#ifndef PRODUCT |
|
2507 |
void ObjectMonitor::verify() { |
|
2508 |
} |
|
2509 |
||
2510 |
void ObjectMonitor::print() { |
|
2511 |
} |
|
2512 |
#endif |