author | dcubed |
Thu, 18 Jul 2013 12:35:55 -0700 | |
changeset 18945 | 1225c36dacd3 |
parent 18025 | b7bcf7497f93 |
child 20282 | 7f9cbdf89af2 |
child 22819 | f88b9c394e42 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
15233
c06b129cf6c7
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
14583
diff
changeset
|
2 |
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2526
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2526
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2526
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "classfile/vmSymbols.hpp" |
|
27 |
#include "memory/resourceArea.hpp" |
|
28 |
#include "oops/markOop.hpp" |
|
29 |
#include "oops/oop.inline.hpp" |
|
30 |
#include "runtime/biasedLocking.hpp" |
|
31 |
#include "runtime/handles.inline.hpp" |
|
32 |
#include "runtime/interfaceSupport.hpp" |
|
33 |
#include "runtime/mutexLocker.hpp" |
|
34 |
#include "runtime/objectMonitor.hpp" |
|
35 |
#include "runtime/objectMonitor.inline.hpp" |
|
36 |
#include "runtime/osThread.hpp" |
|
37 |
#include "runtime/stubRoutines.hpp" |
|
38 |
#include "runtime/synchronizer.hpp" |
|
14583
d70ee55535f4
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
14488
diff
changeset
|
39 |
#include "runtime/thread.inline.hpp" |
7397 | 40 |
#include "utilities/dtrace.hpp" |
41 |
#include "utilities/events.hpp" |
|
42 |
#include "utilities/preserveException.hpp" |
|
43 |
#ifdef TARGET_OS_FAMILY_linux |
|
44 |
# include "os_linux.inline.hpp" |
|
45 |
#endif |
|
46 |
#ifdef TARGET_OS_FAMILY_solaris |
|
47 |
# include "os_solaris.inline.hpp" |
|
48 |
#endif |
|
49 |
#ifdef TARGET_OS_FAMILY_windows |
|
50 |
# include "os_windows.inline.hpp" |
|
51 |
#endif |
|
10565 | 52 |
#ifdef TARGET_OS_FAMILY_bsd |
53 |
# include "os_bsd.inline.hpp" |
|
54 |
#endif |
|
1 | 55 |
|
15475 | 56 |
#if defined(__GNUC__) |
1 | 57 |
// Need to inhibit inlining for older versions of GCC to avoid build-time failures |
58 |
#define ATTR __attribute__((noinline)) |
|
59 |
#else |
|
60 |
#define ATTR |
|
61 |
#endif |
|
62 |
||
63 |
// The "core" versions of monitor enter and exit reside in this file. |
|
64 |
// The interpreter and compilers contain specialized transliterated |
|
65 |
// variants of the enter-exit fast-path operations. See i486.ad fast_lock(), |
|
66 |
// for instance. If you make changes here, make sure to modify the |
|
67 |
// interpreter, and both C1 and C2 fast-path inline locking code emission. |
|
68 |
// |
|
69 |
// |
|
70 |
// ----------------------------------------------------------------------------- |
|
71 |
||
72 |
#ifdef DTRACE_ENABLED |
|
73 |
||
74 |
// Only bother with this argument setup if dtrace is available |
|
75 |
// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. |
|
76 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
77 |
#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ |
1 | 78 |
char* bytes = NULL; \ |
79 |
int len = 0; \ |
|
80 |
jlong jtid = SharedRuntime::get_java_tid(thread); \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
81 |
Symbol* klassname = ((oop)(obj))->klass()->name(); \ |
1 | 82 |
if (klassname != NULL) { \ |
83 |
bytes = (char*)klassname->bytes(); \ |
|
84 |
len = klassname->utf8_length(); \ |
|
85 |
} |
|
86 |
||
10739 | 87 |
#ifndef USDT2 |
88 |
HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait, |
|
89 |
jlong, uintptr_t, char*, int, long); |
|
90 |
HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, |
|
91 |
jlong, uintptr_t, char*, int); |
|
92 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
93 |
#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
1 | 94 |
{ \ |
95 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
96 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
1 | 97 |
HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \ |
98 |
(monitor), bytes, len, (millis)); \ |
|
99 |
} \ |
|
100 |
} |
|
101 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
102 |
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
1 | 103 |
{ \ |
104 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
105 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
1 | 106 |
HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \ |
107 |
(uintptr_t)(monitor), bytes, len); \ |
|
108 |
} \ |
|
109 |
} |
|
110 |
||
10739 | 111 |
#else /* USDT2 */ |
112 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
113 |
#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
10739 | 114 |
{ \ |
115 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
116 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
10739 | 117 |
HOTSPOT_MONITOR_WAIT(jtid, \ |
118 |
(uintptr_t)(monitor), bytes, len, (millis)); \ |
|
119 |
} \ |
|
120 |
} |
|
121 |
||
122 |
#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_PROBE_WAITED |
|
123 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
124 |
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
10739 | 125 |
{ \ |
126 |
if (DTraceMonitorProbes) { \ |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
127 |
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
10739 | 128 |
HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ |
129 |
(uintptr_t)(monitor), bytes, len); \ |
|
130 |
} \ |
|
131 |
} |
|
132 |
||
133 |
#endif /* USDT2 */ |
|
1 | 134 |
#else // ndef DTRACE_ENABLED |
135 |
||
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
136 |
#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
10739
diff
changeset
|
137 |
#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} |
1 | 138 |
|
139 |
#endif // ndef DTRACE_ENABLED |
|
140 |
||
6975 | 141 |
// This exists only as a workaround of dtrace bug 6254741 |
142 |
int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { |
|
143 |
DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); |
|
144 |
return 0; |
|
145 |
} |
|
146 |
||
147 |
#define NINFLATIONLOCKS 256 |
|
148 |
static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; |
|
149 |
||
150 |
ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; |
|
151 |
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; |
|
152 |
ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ; |
|
153 |
int ObjectSynchronizer::gOmInUseCount = 0; |
|
154 |
static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache |
|
155 |
static volatile int MonitorFreeCount = 0 ; // # on gFreeList |
|
156 |
static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation |
|
157 |
#define CHAINMARKER ((oop)-1) |
|
158 |
||
159 |
// ----------------------------------------------------------------------------- |
|
160 |
// Fast Monitor Enter/Exit |
|
161 |
// This the fast monitor enter. The interpreter and compiler use |
|
162 |
// some assembly copies of this code. Make sure update those code |
|
163 |
// if the following function is changed. The implementation is |
|
164 |
// extremely sensitive to race condition. Be careful. |
|
165 |
||
166 |
void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { |
|
167 |
if (UseBiasedLocking) { |
|
168 |
if (!SafepointSynchronize::is_at_safepoint()) { |
|
169 |
BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); |
|
170 |
if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { |
|
171 |
return; |
|
172 |
} |
|
173 |
} else { |
|
174 |
assert(!attempt_rebias, "can not rebias toward VM thread"); |
|
175 |
BiasedLocking::revoke_at_safepoint(obj); |
|
176 |
} |
|
177 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
178 |
} |
|
179 |
||
180 |
slow_enter (obj, lock, THREAD) ; |
|
181 |
} |
|
182 |
||
183 |
void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { |
|
184 |
assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); |
|
185 |
// if displaced header is null, the previous enter is recursive enter, no-op |
|
186 |
markOop dhw = lock->displaced_header(); |
|
187 |
markOop mark ; |
|
188 |
if (dhw == NULL) { |
|
189 |
// Recursive stack-lock. |
|
190 |
// Diagnostics -- Could be: stack-locked, inflating, inflated. |
|
191 |
mark = object->mark() ; |
|
192 |
assert (!mark->is_neutral(), "invariant") ; |
|
193 |
if (mark->has_locker() && mark != markOopDesc::INFLATING()) { |
|
194 |
assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; |
|
195 |
} |
|
196 |
if (mark->has_monitor()) { |
|
197 |
ObjectMonitor * m = mark->monitor() ; |
|
198 |
assert(((oop)(m->object()))->mark() == mark, "invariant") ; |
|
199 |
assert(m->is_entered(THREAD), "invariant") ; |
|
200 |
} |
|
201 |
return ; |
|
202 |
} |
|
203 |
||
204 |
mark = object->mark() ; |
|
1 | 205 |
|
6975 | 206 |
// If the object is stack-locked by the current thread, try to |
207 |
// swing the displaced header from the box back to the mark. |
|
208 |
if (mark == (markOop) lock) { |
|
209 |
assert (dhw->is_neutral(), "invariant") ; |
|
210 |
if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { |
|
211 |
TEVENT (fast_exit: release stacklock) ; |
|
212 |
return; |
|
213 |
} |
|
214 |
} |
|
215 |
||
18025 | 216 |
ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ; |
6975 | 217 |
} |
218 |
||
219 |
// ----------------------------------------------------------------------------- |
|
220 |
// Interpreter/Compiler Slow Case |
|
221 |
// This routine is used to handle interpreter/compiler slow case |
|
222 |
// We don't need to use fast path here, because it must have been |
|
223 |
// failed in the interpreter/compiler code. |
|
224 |
void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { |
|
225 |
markOop mark = obj->mark(); |
|
226 |
assert(!mark->has_bias_pattern(), "should not see bias pattern here"); |
|
227 |
||
228 |
if (mark->is_neutral()) { |
|
229 |
// Anticipate successful CAS -- the ST of the displaced mark must |
|
230 |
// be visible <= the ST performed by the CAS. |
|
231 |
lock->set_displaced_header(mark); |
|
232 |
if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { |
|
233 |
TEVENT (slow_enter: release stacklock) ; |
|
234 |
return ; |
|
235 |
} |
|
236 |
// Fall through to inflate() ... |
|
237 |
} else |
|
238 |
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
|
239 |
assert(lock != mark->locker(), "must not re-lock the same lock"); |
|
240 |
assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); |
|
241 |
lock->set_displaced_header(NULL); |
|
242 |
return; |
|
243 |
} |
|
244 |
||
245 |
#if 0 |
|
246 |
// The following optimization isn't particularly useful. |
|
247 |
if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { |
|
248 |
lock->set_displaced_header (NULL) ; |
|
249 |
return ; |
|
250 |
} |
|
251 |
#endif |
|
252 |
||
253 |
// The object header will never be displaced to this lock, |
|
254 |
// so it does not matter what the value is, except that it |
|
255 |
// must be non-zero to avoid looking like a re-entrant lock, |
|
256 |
// and must not look locked either. |
|
257 |
lock->set_displaced_header(markOopDesc::unused_mark()); |
|
258 |
ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); |
|
259 |
} |
|
260 |
||
261 |
// This routine is used to handle interpreter/compiler slow case |
|
262 |
// We don't need to use fast path here, because it must have |
|
263 |
// failed in the interpreter/compiler code. Simply use the heavy |
|
264 |
// weight monitor should be ok, unless someone find otherwise. |
|
265 |
void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { |
|
266 |
fast_exit (object, lock, THREAD) ; |
|
267 |
} |
|
268 |
||
269 |
// ----------------------------------------------------------------------------- |
|
270 |
// Class Loader support to workaround deadlocks on the class loader lock objects |
|
271 |
// Also used by GC |
|
272 |
// complete_exit()/reenter() are used to wait on a nested lock |
|
273 |
// i.e. to give up an outer lock completely and then re-enter |
|
274 |
// Used when holding nested locks - lock acquisition order: lock1 then lock2 |
|
275 |
// 1) complete_exit lock1 - saving recursion count |
|
276 |
// 2) wait on lock2 |
|
277 |
// 3) when notified on lock2, unlock lock2 |
|
278 |
// 4) reenter lock1 with original recursion count |
|
279 |
// 5) lock lock2 |
|
280 |
// NOTE: must use heavy weight monitor to handle complete_exit/reenter() |
|
281 |
intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { |
|
282 |
TEVENT (complete_exit) ; |
|
283 |
if (UseBiasedLocking) { |
|
284 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
285 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
1 | 286 |
} |
287 |
||
6975 | 288 |
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
289 |
||
290 |
return monitor->complete_exit(THREAD); |
|
291 |
} |
|
292 |
||
293 |
// NOTE: must use heavy weight monitor to handle complete_exit/reenter() |
|
294 |
void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { |
|
295 |
TEVENT (reenter) ; |
|
296 |
if (UseBiasedLocking) { |
|
297 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
298 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
299 |
} |
|
300 |
||
301 |
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
|
302 |
||
303 |
monitor->reenter(recursion, THREAD); |
|
304 |
} |
|
305 |
// ----------------------------------------------------------------------------- |
|
306 |
// JNI locks on java objects |
|
307 |
// NOTE: must use heavy weight monitor to handle jni monitor enter |
|
308 |
void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter |
|
309 |
// the current locking is from JNI instead of Java code |
|
310 |
TEVENT (jni_enter) ; |
|
311 |
if (UseBiasedLocking) { |
|
312 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
313 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
314 |
} |
|
315 |
THREAD->set_current_pending_monitor_is_from_java(false); |
|
316 |
ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); |
|
317 |
THREAD->set_current_pending_monitor_is_from_java(true); |
|
318 |
} |
|
319 |
||
320 |
// NOTE: must use heavy weight monitor to handle jni monitor enter |
|
321 |
bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { |
|
322 |
if (UseBiasedLocking) { |
|
323 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
324 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
1 | 325 |
} |
326 |
||
6975 | 327 |
ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); |
328 |
return monitor->try_enter(THREAD); |
|
329 |
} |
|
330 |
||
331 |
||
332 |
// NOTE: must use heavy weight monitor to handle jni monitor exit |
|
333 |
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { |
|
334 |
TEVENT (jni_exit) ; |
|
335 |
if (UseBiasedLocking) { |
|
15233
c06b129cf6c7
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
14583
diff
changeset
|
336 |
Handle h_obj(THREAD, obj); |
c06b129cf6c7
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
14583
diff
changeset
|
337 |
BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); |
c06b129cf6c7
6444286: Possible naked oop related to biased locking revocation safepoint in jni_exit()
dcubed
parents:
14583
diff
changeset
|
338 |
obj = h_obj(); |
6975 | 339 |
} |
340 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
341 |
||
342 |
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); |
|
343 |
// If this thread has locked the object, exit the monitor. Note: can't use |
|
344 |
// monitor->check(CHECK); must exit even if an exception is pending. |
|
345 |
if (monitor->check(THREAD)) { |
|
18025 | 346 |
monitor->exit(true, THREAD); |
1 | 347 |
} |
6975 | 348 |
} |
349 |
||
350 |
// ----------------------------------------------------------------------------- |
|
351 |
// Internal VM locks on java objects |
|
352 |
// standard constructor, allows locking failures |
|
353 |
ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { |
|
354 |
_dolock = doLock; |
|
355 |
_thread = thread; |
|
356 |
debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) |
|
357 |
_obj = obj; |
|
1 | 358 |
|
6975 | 359 |
if (_dolock) { |
360 |
TEVENT (ObjectLocker) ; |
|
361 |
||
362 |
ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); |
|
363 |
} |
|
364 |
} |
|
365 |
||
366 |
ObjectLocker::~ObjectLocker() { |
|
367 |
if (_dolock) { |
|
368 |
ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); |
|
369 |
} |
|
370 |
} |
|
1 | 371 |
|
372 |
||
6975 | 373 |
// ----------------------------------------------------------------------------- |
374 |
// Wait/Notify/NotifyAll |
|
375 |
// NOTE: must use heavy weight monitor to handle wait() |
|
376 |
void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { |
|
377 |
if (UseBiasedLocking) { |
|
378 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
379 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
380 |
} |
|
381 |
if (millis < 0) { |
|
382 |
TEVENT (wait - throw IAX) ; |
|
383 |
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
|
384 |
} |
|
385 |
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
|
386 |
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); |
|
387 |
monitor->wait(millis, true, THREAD); |
|
388 |
||
389 |
/* This dummy call is in place to get around dtrace bug 6254741. Once |
|
390 |
that's fixed we can uncomment the following line and remove the call */ |
|
391 |
// DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); |
|
392 |
dtrace_waited_probe(monitor, obj, THREAD); |
|
393 |
} |
|
1 | 394 |
|
6975 | 395 |
void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { |
396 |
if (UseBiasedLocking) { |
|
397 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
398 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
399 |
} |
|
400 |
if (millis < 0) { |
|
401 |
TEVENT (wait - throw IAX) ; |
|
402 |
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
|
403 |
} |
|
404 |
ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; |
|
405 |
} |
|
406 |
||
407 |
void ObjectSynchronizer::notify(Handle obj, TRAPS) { |
|
408 |
if (UseBiasedLocking) { |
|
409 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
410 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
411 |
} |
|
1 | 412 |
|
6975 | 413 |
markOop mark = obj->mark(); |
414 |
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
|
415 |
return; |
|
416 |
} |
|
417 |
ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); |
|
418 |
} |
|
1 | 419 |
|
6975 | 420 |
// NOTE: see comment of notify() |
421 |
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { |
|
422 |
if (UseBiasedLocking) { |
|
423 |
BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
|
424 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
425 |
} |
|
426 |
||
427 |
markOop mark = obj->mark(); |
|
428 |
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
|
429 |
return; |
|
430 |
} |
|
431 |
ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); |
|
432 |
} |
|
433 |
||
434 |
// ----------------------------------------------------------------------------- |
|
435 |
// Hash Code handling |
|
436 |
// |
|
1 | 437 |
// Performance concern: |
438 |
// OrderAccess::storestore() calls release() which STs 0 into the global volatile |
|
439 |
// OrderAccess::Dummy variable. This store is unnecessary for correctness. |
|
440 |
// Many threads STing into a common location causes considerable cache migration |
|
441 |
// or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() |
|
442 |
// until it's repaired. In some cases OrderAccess::fence() -- which incurs local |
|
443 |
// latency on the executing processor -- is a better choice as it scales on SMP |
|
444 |
// systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a |
|
445 |
// discussion of coherency costs. Note that all our current reference platforms |
|
446 |
// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. |
|
447 |
// |
|
448 |
// As a general policy we use "volatile" to control compiler-based reordering |
|
449 |
// and explicit fences (barriers) to control for architectural reordering performed |
|
450 |
// by the CPU(s) or platform. |
|
451 |
||
452 |
struct SharedGlobals { |
|
453 |
// These are highly shared mostly-read variables. |
|
454 |
// To avoid false-sharing they need to be the sole occupants of a $ line. |
|
455 |
double padPrefix [8]; |
|
456 |
volatile int stwRandom ; |
|
457 |
volatile int stwCycle ; |
|
458 |
||
459 |
// Hot RW variables -- Sequester to avoid false-sharing |
|
460 |
double padSuffix [16]; |
|
461 |
volatile int hcSequence ; |
|
462 |
double padFinal [8] ; |
|
463 |
} ; |
|
464 |
||
465 |
static SharedGlobals GVars ; |
|
5710 | 466 |
static int MonitorScavengeThreshold = 1000000 ; |
467 |
static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending |
|
1 | 468 |
|
6975 | 469 |
static markOop ReadStableMark (oop obj) { |
470 |
markOop mark = obj->mark() ; |
|
471 |
if (!mark->is_being_inflated()) { |
|
472 |
return mark ; // normal fast-path return |
|
473 |
} |
|
1 | 474 |
|
6975 | 475 |
int its = 0 ; |
476 |
for (;;) { |
|
477 |
markOop mark = obj->mark() ; |
|
478 |
if (!mark->is_being_inflated()) { |
|
479 |
return mark ; // normal fast-path return |
|
480 |
} |
|
481 |
||
482 |
// The object is being inflated by some other thread. |
|
483 |
// The caller of ReadStableMark() must wait for inflation to complete. |
|
484 |
// Avoid live-lock |
|
485 |
// TODO: consider calling SafepointSynchronize::do_call_back() while |
|
486 |
// spinning to see if there's a safepoint pending. If so, immediately |
|
487 |
// yielding or blocking would be appropriate. Avoid spinning while |
|
488 |
// there is a safepoint pending. |
|
489 |
// TODO: add inflation contention performance counters. |
|
490 |
// TODO: restrict the aggregate number of spinners. |
|
1 | 491 |
|
6975 | 492 |
++its ; |
493 |
if (its > 10000 || !os::is_MP()) { |
|
494 |
if (its & 1) { |
|
495 |
os::NakedYield() ; |
|
496 |
TEVENT (Inflate: INFLATING - yield) ; |
|
497 |
} else { |
|
498 |
// Note that the following code attenuates the livelock problem but is not |
|
499 |
// a complete remedy. A more complete solution would require that the inflating |
|
500 |
// thread hold the associated inflation lock. The following code simply restricts |
|
501 |
// the number of spinners to at most one. We'll have N-2 threads blocked |
|
502 |
// on the inflationlock, 1 thread holding the inflation lock and using |
|
503 |
// a yield/park strategy, and 1 thread in the midst of inflation. |
|
504 |
// A more refined approach would be to change the encoding of INFLATING |
|
505 |
// to allow encapsulation of a native thread pointer. Threads waiting for |
|
506 |
// inflation to complete would use CAS to push themselves onto a singly linked |
|
507 |
// list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag |
|
508 |
// and calling park(). When inflation was complete the thread that accomplished inflation |
|
509 |
// would detach the list and set the markword to inflated with a single CAS and |
|
510 |
// then for each thread on the list, set the flag and unpark() the thread. |
|
511 |
// This is conceptually similar to muxAcquire-muxRelease, except that muxRelease |
|
512 |
// wakes at most one thread whereas we need to wake the entire list. |
|
513 |
int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; |
|
514 |
int YieldThenBlock = 0 ; |
|
515 |
assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; |
|
516 |
assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; |
|
517 |
Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; |
|
518 |
while (obj->mark() == markOopDesc::INFLATING()) { |
|
519 |
// Beware: NakedYield() is advisory and has almost no effect on some platforms |
|
520 |
// so we periodically call Self->_ParkEvent->park(1). |
|
521 |
// We use a mixed spin/yield/block mechanism. |
|
522 |
if ((YieldThenBlock++) >= 16) { |
|
523 |
Thread::current()->_ParkEvent->park(1) ; |
|
524 |
} else { |
|
525 |
os::NakedYield() ; |
|
526 |
} |
|
527 |
} |
|
528 |
Thread::muxRelease (InflationLocks + ix ) ; |
|
529 |
TEVENT (Inflate: INFLATING - yield/park) ; |
|
530 |
} |
|
531 |
} else { |
|
532 |
SpinPause() ; // SMP-polite spinning |
|
533 |
} |
|
534 |
} |
|
535 |
} |
|
1 | 536 |
|
537 |
// hashCode() generation : |
|
538 |
// |
|
539 |
// Possibilities: |
|
540 |
// * MD5Digest of {obj,stwRandom} |
|
541 |
// * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. |
|
542 |
// * A DES- or AES-style SBox[] mechanism |
|
543 |
// * One of the Phi-based schemes, such as: |
|
544 |
// 2654435761 = 2^32 * Phi (golden ratio) |
|
545 |
// HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; |
|
546 |
// * A variation of Marsaglia's shift-xor RNG scheme. |
|
547 |
// * (obj ^ stwRandom) is appealing, but can result |
|
548 |
// in undesirable regularity in the hashCode values of adjacent objects |
|
549 |
// (objects allocated back-to-back, in particular). This could potentially |
|
550 |
// result in hashtable collisions and reduced hashtable efficiency. |
|
551 |
// There are simple ways to "diffuse" the middle address bits over the |
|
552 |
// generated hashCode values: |
|
553 |
// |
|
554 |
||
555 |
static inline intptr_t get_next_hash(Thread * Self, oop obj) { |
|
556 |
intptr_t value = 0 ; |
|
557 |
if (hashCode == 0) { |
|
558 |
// This form uses an unguarded global Park-Miller RNG, |
|
559 |
// so it's possible for two threads to race and generate the same RNG. |
|
560 |
// On MP system we'll have lots of RW access to a global, so the |
|
561 |
// mechanism induces lots of coherency traffic. |
|
562 |
value = os::random() ; |
|
563 |
} else |
|
564 |
if (hashCode == 1) { |
|
565 |
// This variation has the property of being stable (idempotent) |
|
566 |
// between STW operations. This can be useful in some of the 1-0 |
|
567 |
// synchronization schemes. |
|
568 |
intptr_t addrBits = intptr_t(obj) >> 3 ; |
|
569 |
value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; |
|
570 |
} else |
|
571 |
if (hashCode == 2) { |
|
572 |
value = 1 ; // for sensitivity testing |
|
573 |
} else |
|
574 |
if (hashCode == 3) { |
|
575 |
value = ++GVars.hcSequence ; |
|
576 |
} else |
|
577 |
if (hashCode == 4) { |
|
578 |
value = intptr_t(obj) ; |
|
579 |
} else { |
|
580 |
// Marsaglia's xor-shift scheme with thread-specific state |
|
581 |
// This is probably the best overall implementation -- we'll |
|
582 |
// likely make this the default in future releases. |
|
583 |
unsigned t = Self->_hashStateX ; |
|
584 |
t ^= (t << 11) ; |
|
585 |
Self->_hashStateX = Self->_hashStateY ; |
|
586 |
Self->_hashStateY = Self->_hashStateZ ; |
|
587 |
Self->_hashStateZ = Self->_hashStateW ; |
|
588 |
unsigned v = Self->_hashStateW ; |
|
589 |
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; |
|
590 |
Self->_hashStateW = v ; |
|
591 |
value = v ; |
|
592 |
} |
|
593 |
||
594 |
value &= markOopDesc::hash_mask; |
|
595 |
if (value == 0) value = 0xBAD ; |
|
596 |
assert (value != markOopDesc::no_hash, "invariant") ; |
|
597 |
TEVENT (hashCode: GENERATE) ; |
|
598 |
return value; |
|
599 |
} |
|
6975 | 600 |
// |
601 |
intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { |
|
602 |
if (UseBiasedLocking) { |
|
603 |
// NOTE: many places throughout the JVM do not expect a safepoint |
|
604 |
// to be taken here, in particular most operations on perm gen |
|
605 |
// objects. However, we only ever bias Java instances and all of |
|
606 |
// the call sites of identity_hash that might revoke biases have |
|
607 |
// been checked to make sure they can handle a safepoint. The |
|
608 |
// added check of the bias pattern is to avoid useless calls to |
|
609 |
// thread-local storage. |
|
610 |
if (obj->mark()->has_bias_pattern()) { |
|
611 |
// Box and unbox the raw reference just in case we cause a STW safepoint. |
|
612 |
Handle hobj (Self, obj) ; |
|
613 |
// Relaxing assertion for bug 6320749. |
|
614 |
assert (Universe::verify_in_progress() || |
|
615 |
!SafepointSynchronize::is_at_safepoint(), |
|
616 |
"biases should not be seen by VM thread here"); |
|
617 |
BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); |
|
618 |
obj = hobj() ; |
|
619 |
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
620 |
} |
|
621 |
} |
|
1 | 622 |
|
6975 | 623 |
// hashCode() is a heap mutator ... |
624 |
// Relaxing assertion for bug 6320749. |
|
625 |
assert (Universe::verify_in_progress() || |
|
626 |
!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
|
627 |
assert (Universe::verify_in_progress() || |
|
628 |
Self->is_Java_thread() , "invariant") ; |
|
629 |
assert (Universe::verify_in_progress() || |
|
630 |
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
|
631 |
||
632 |
ObjectMonitor* monitor = NULL; |
|
633 |
markOop temp, test; |
|
634 |
intptr_t hash; |
|
635 |
markOop mark = ReadStableMark (obj); |
|
636 |
||
637 |
// object should remain ineligible for biased locking |
|
638 |
assert (!mark->has_bias_pattern(), "invariant") ; |
|
639 |
||
640 |
if (mark->is_neutral()) { |
|
641 |
hash = mark->hash(); // this is a normal header |
|
642 |
if (hash) { // if it has hash, just return it |
|
643 |
return hash; |
|
644 |
} |
|
645 |
hash = get_next_hash(Self, obj); // allocate a new hash code |
|
646 |
temp = mark->copy_set_hash(hash); // merge the hash code into header |
|
647 |
// use (machine word version) atomic operation to install the hash |
|
648 |
test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); |
|
649 |
if (test == mark) { |
|
650 |
return hash; |
|
651 |
} |
|
652 |
// If atomic operation failed, we must inflate the header |
|
653 |
// into heavy weight monitor. We could add more code here |
|
654 |
// for fast path, but it does not worth the complexity. |
|
655 |
} else if (mark->has_monitor()) { |
|
656 |
monitor = mark->monitor(); |
|
657 |
temp = monitor->header(); |
|
658 |
assert (temp->is_neutral(), "invariant") ; |
|
659 |
hash = temp->hash(); |
|
660 |
if (hash) { |
|
661 |
return hash; |
|
662 |
} |
|
663 |
// Skip to the following code to reduce code size |
|
664 |
} else if (Self->is_lock_owned((address)mark->locker())) { |
|
665 |
temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned |
|
666 |
assert (temp->is_neutral(), "invariant") ; |
|
667 |
hash = temp->hash(); // by current thread, check if the displaced |
|
668 |
if (hash) { // header contains hash code |
|
669 |
return hash; |
|
670 |
} |
|
671 |
// WARNING: |
|
672 |
// The displaced header is strictly immutable. |
|
673 |
// It can NOT be changed in ANY cases. So we have |
|
674 |
// to inflate the header into heavyweight monitor |
|
675 |
// even the current thread owns the lock. The reason |
|
676 |
// is the BasicLock (stack slot) will be asynchronously |
|
677 |
// read by other threads during the inflate() function. |
|
678 |
// Any change to stack may not propagate to other threads |
|
679 |
// correctly. |
|
680 |
} |
|
681 |
||
682 |
// Inflate the monitor to set hash code |
|
683 |
monitor = ObjectSynchronizer::inflate(Self, obj); |
|
684 |
// Load displaced header and check it has hash code |
|
685 |
mark = monitor->header(); |
|
686 |
assert (mark->is_neutral(), "invariant") ; |
|
687 |
hash = mark->hash(); |
|
688 |
if (hash == 0) { |
|
689 |
hash = get_next_hash(Self, obj); |
|
690 |
temp = mark->copy_set_hash(hash); // merge hash code into header |
|
691 |
assert (temp->is_neutral(), "invariant") ; |
|
692 |
test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); |
|
693 |
if (test != mark) { |
|
694 |
// The only update to the header in the monitor (outside GC) |
|
695 |
// is install the hash code. If someone add new usage of |
|
696 |
// displaced header, please update this code |
|
697 |
hash = test->hash(); |
|
698 |
assert (test->is_neutral(), "invariant") ; |
|
699 |
assert (hash != 0, "Trivial unexpected object/monitor header usage."); |
|
700 |
} |
|
701 |
} |
|
702 |
// We finally get the hash |
|
703 |
return hash; |
|
1 | 704 |
} |
705 |
||
6975 | 706 |
// Deprecated -- use FastHashCode() instead. |
1 | 707 |
|
6975 | 708 |
intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { |
709 |
return FastHashCode (Thread::current(), obj()) ; |
|
1 | 710 |
} |
711 |
||
712 |
||
6975 | 713 |
bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, |
714 |
Handle h_obj) { |
|
715 |
if (UseBiasedLocking) { |
|
716 |
BiasedLocking::revoke_and_rebias(h_obj, false, thread); |
|
717 |
assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
718 |
} |
|
719 |
||
720 |
assert(thread == JavaThread::current(), "Can only be called on current thread"); |
|
721 |
oop obj = h_obj(); |
|
722 |
||
723 |
markOop mark = ReadStableMark (obj) ; |
|
1 | 724 |
|
6975 | 725 |
// Uncontended case, header points to stack |
726 |
if (mark->has_locker()) { |
|
727 |
return thread->is_lock_owned((address)mark->locker()); |
|
1 | 728 |
} |
6975 | 729 |
// Contended case, header points to ObjectMonitor (tagged pointer) |
730 |
if (mark->has_monitor()) { |
|
731 |
ObjectMonitor* monitor = mark->monitor(); |
|
732 |
return monitor->is_entered(thread) != 0 ; |
|
1 | 733 |
} |
6975 | 734 |
// Unlocked case, header in place |
735 |
assert(mark->is_neutral(), "sanity check"); |
|
736 |
return false; |
|
1 | 737 |
} |
738 |
||
6975 | 739 |
// Be aware of this method could revoke bias of the lock object. |
740 |
// This method querys the ownership of the lock handle specified by 'h_obj'. |
|
741 |
// If the current thread owns the lock, it returns owner_self. If no |
|
742 |
// thread owns the lock, it returns owner_none. Otherwise, it will return |
|
743 |
// ower_other. |
|
744 |
ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership |
|
745 |
(JavaThread *self, Handle h_obj) { |
|
746 |
// The caller must beware this method can revoke bias, and |
|
747 |
// revocation can result in a safepoint. |
|
748 |
assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
|
749 |
assert (self->thread_state() != _thread_blocked , "invariant") ; |
|
1 | 750 |
|
6975 | 751 |
// Possible mark states: neutral, biased, stack-locked, inflated |
752 |
||
753 |
if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { |
|
754 |
// CASE: biased |
|
755 |
BiasedLocking::revoke_and_rebias(h_obj, false, self); |
|
756 |
assert(!h_obj->mark()->has_bias_pattern(), |
|
757 |
"biases should be revoked by now"); |
|
758 |
} |
|
1 | 759 |
|
6975 | 760 |
assert(self == JavaThread::current(), "Can only be called on current thread"); |
761 |
oop obj = h_obj(); |
|
762 |
markOop mark = ReadStableMark (obj) ; |
|
763 |
||
764 |
// CASE: stack-locked. Mark points to a BasicLock on the owner's stack. |
|
765 |
if (mark->has_locker()) { |
|
766 |
return self->is_lock_owned((address)mark->locker()) ? |
|
767 |
owner_self : owner_other; |
|
768 |
} |
|
1 | 769 |
|
6975 | 770 |
// CASE: inflated. Mark (tagged pointer) points to an objectMonitor. |
771 |
// The Object:ObjectMonitor relationship is stable as long as we're |
|
772 |
// not at a safepoint. |
|
773 |
if (mark->has_monitor()) { |
|
774 |
void * owner = mark->monitor()->_owner ; |
|
775 |
if (owner == NULL) return owner_none ; |
|
776 |
return (owner == self || |
|
777 |
self->is_lock_owned((address)owner)) ? owner_self : owner_other; |
|
778 |
} |
|
779 |
||
780 |
// CASE: neutral |
|
781 |
assert(mark->is_neutral(), "sanity check"); |
|
782 |
return owner_none ; // it's unlocked |
|
783 |
} |
|
1 | 784 |
|
6975 | 785 |
// FIXME: jvmti should call this |
786 |
JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { |
|
787 |
if (UseBiasedLocking) { |
|
788 |
if (SafepointSynchronize::is_at_safepoint()) { |
|
789 |
BiasedLocking::revoke_at_safepoint(h_obj); |
|
790 |
} else { |
|
791 |
BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); |
|
792 |
} |
|
793 |
assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
|
794 |
} |
|
795 |
||
796 |
oop obj = h_obj(); |
|
797 |
address owner = NULL; |
|
798 |
||
799 |
markOop mark = ReadStableMark (obj) ; |
|
800 |
||
801 |
// Uncontended case, header points to stack |
|
802 |
if (mark->has_locker()) { |
|
803 |
owner = (address) mark->locker(); |
|
804 |
} |
|
805 |
||
806 |
// Contended case, header points to ObjectMonitor (tagged pointer) |
|
807 |
if (mark->has_monitor()) { |
|
808 |
ObjectMonitor* monitor = mark->monitor(); |
|
809 |
assert(monitor != NULL, "monitor should be non-null"); |
|
810 |
owner = (address) monitor->owner(); |
|
811 |
} |
|
812 |
||
813 |
if (owner != NULL) { |
|
15853
1c4e16950e96
8007476: assert(the_owner != NULL) failed: Did not find owning Java thread for lock word address
dcubed
parents:
15475
diff
changeset
|
814 |
// owning_thread_from_monitor_owner() may also return NULL here |
6975 | 815 |
return Threads::owning_thread_from_monitor_owner(owner, doLock); |
816 |
} |
|
817 |
||
818 |
// Unlocked case, header in place |
|
819 |
// Cannot have assertion since this object may have been |
|
820 |
// locked by another thread when reaching here. |
|
821 |
// assert(mark->is_neutral(), "sanity check"); |
|
822 |
||
823 |
return NULL; |
|
824 |
} |
|
825 |
// Visitors ... |
|
826 |
||
827 |
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { |
|
828 |
ObjectMonitor* block = gBlockList; |
|
829 |
ObjectMonitor* mid; |
|
830 |
while (block) { |
|
831 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
832 |
for (int i = _BLOCKSIZE - 1; i > 0; i--) { |
|
833 |
mid = block + i; |
|
834 |
oop object = (oop) mid->object(); |
|
835 |
if (object != NULL) { |
|
836 |
closure->do_monitor(mid); |
|
837 |
} |
|
838 |
} |
|
839 |
block = (ObjectMonitor*) block->FreeNext; |
|
1 | 840 |
} |
841 |
} |
|
842 |
||
6975 | 843 |
// Get the next block in the block list. |
844 |
static inline ObjectMonitor* next(ObjectMonitor* block) { |
|
845 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
846 |
block = block->FreeNext ; |
|
847 |
assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); |
|
848 |
return block; |
|
1 | 849 |
} |
850 |
||
851 |
||
6975 | 852 |
void ObjectSynchronizer::oops_do(OopClosure* f) { |
853 |
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
|
854 |
for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { |
|
855 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
856 |
for (int i = 1; i < _BLOCKSIZE; i++) { |
|
857 |
ObjectMonitor* mid = &block[i]; |
|
858 |
if (mid->object() != NULL) { |
|
859 |
f->do_oop((oop*)mid->object_addr()); |
|
1 | 860 |
} |
861 |
} |
|
862 |
} |
|
863 |
} |
|
864 |
||
865 |
||
6975 | 866 |
// ----------------------------------------------------------------------------- |
1 | 867 |
// ObjectMonitor Lifecycle |
868 |
// ----------------------- |
|
869 |
// Inflation unlinks monitors from the global gFreeList and |
|
870 |
// associates them with objects. Deflation -- which occurs at |
|
871 |
// STW-time -- disassociates idle monitors from objects. Such |
|
872 |
// scavenged monitors are returned to the gFreeList. |
|
873 |
// |
|
874 |
// The global list is protected by ListLock. All the critical sections |
|
875 |
// are short and operate in constant-time. |
|
876 |
// |
|
877 |
// ObjectMonitors reside in type-stable memory (TSM) and are immortal. |
|
878 |
// |
|
879 |
// Lifecycle: |
|
880 |
// -- unassigned and on the global free list |
|
881 |
// -- unassigned and on a thread's private omFreeList |
|
882 |
// -- assigned to an object. The object is inflated and the mark refers |
|
883 |
// to the objectmonitor. |
|
884 |
// |
|
885 |
||
886 |
||
5710 | 887 |
// Constraining monitor pool growth via MonitorBound ... |
888 |
// |
|
889 |
// The monitor pool is grow-only. We scavenge at STW safepoint-time, but the |
|
890 |
// the rate of scavenging is driven primarily by GC. As such, we can find |
|
891 |
// an inordinate number of monitors in circulation. |
|
892 |
// To avoid that scenario we can artificially induce a STW safepoint |
|
893 |
// if the pool appears to be growing past some reasonable bound. |
|
894 |
// Generally we favor time in space-time tradeoffs, but as there's no |
|
895 |
// natural back-pressure on the # of extant monitors we need to impose some |
|
896 |
// type of limit. Beware that if MonitorBound is set to too low a value |
|
897 |
// we could just loop. In addition, if MonitorBound is set to a low value |
|
898 |
// we'll incur more safepoints, which are harmful to performance. |
|
899 |
// See also: GuaranteedSafepointInterval |
|
900 |
// |
|
901 |
// The current implementation uses asynchronous VM operations. |
|
902 |
// |
|
903 |
||
904 |
static void InduceScavenge (Thread * Self, const char * Whence) { |
|
905 |
// Induce STW safepoint to trim monitors |
|
906 |
// Ultimately, this results in a call to deflate_idle_monitors() in the near future. |
|
907 |
// More precisely, trigger an asynchronous STW safepoint as the number |
|
908 |
// of active monitors passes the specified threshold. |
|
909 |
// TODO: assert thread state is reasonable |
|
910 |
||
911 |
if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { |
|
6975 | 912 |
if (ObjectMonitor::Knob_Verbose) { |
5710 | 913 |
::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
914 |
::fflush(stdout) ; |
|
915 |
} |
|
916 |
// Induce a 'null' safepoint to scavenge monitors |
|
917 |
// Must VM_Operation instance be heap allocated as the op will be enqueue and posted |
|
918 |
// to the VMthread and have a lifespan longer than that of this activation record. |
|
919 |
// The VMThread will delete the op when completed. |
|
920 |
VMThread::execute (new VM_ForceAsyncSafepoint()) ; |
|
921 |
||
6975 | 922 |
if (ObjectMonitor::Knob_Verbose) { |
5710 | 923 |
::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
924 |
::fflush(stdout) ; |
|
925 |
} |
|
926 |
} |
|
927 |
} |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
928 |
/* Too slow for general assert or debug |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
929 |
void ObjectSynchronizer::verifyInUse (Thread *Self) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
930 |
ObjectMonitor* mid; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
931 |
int inusetally = 0; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
932 |
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
933 |
inusetally ++; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
934 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
935 |
assert(inusetally == Self->omInUseCount, "inuse count off"); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
936 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
937 |
int freetally = 0; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
938 |
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
939 |
freetally ++; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
940 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
941 |
assert(freetally == Self->omFreeCount, "free count off"); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
942 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
943 |
*/ |
1 | 944 |
ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { |
945 |
// A large MAXPRIVATE value reduces both list lock contention |
|
946 |
// and list coherency traffic, but also tends to increase the |
|
947 |
// number of objectMonitors in circulation as well as the STW |
|
948 |
// scavenge costs. As usual, we lean toward time in space-time |
|
949 |
// tradeoffs. |
|
950 |
const int MAXPRIVATE = 1024 ; |
|
951 |
for (;;) { |
|
952 |
ObjectMonitor * m ; |
|
953 |
||
954 |
// 1: try to allocate from the thread's local omFreeList. |
|
955 |
// Threads will attempt to allocate first from their local list, then |
|
956 |
// from the global list, and only after those attempts fail will the thread |
|
957 |
// attempt to instantiate new monitors. Thread-local free lists take |
|
958 |
// heat off the ListLock and improve allocation latency, as well as reducing |
|
959 |
// coherency traffic on the shared global list. |
|
960 |
m = Self->omFreeList ; |
|
961 |
if (m != NULL) { |
|
962 |
Self->omFreeList = m->FreeNext ; |
|
963 |
Self->omFreeCount -- ; |
|
964 |
// CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene |
|
965 |
guarantee (m->object() == NULL, "invariant") ; |
|
5710 | 966 |
if (MonitorInUseLists) { |
967 |
m->FreeNext = Self->omInUseList; |
|
968 |
Self->omInUseList = m; |
|
969 |
Self->omInUseCount ++; |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
970 |
// verifyInUse(Self); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
971 |
} else { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
972 |
m->FreeNext = NULL; |
5710 | 973 |
} |
1 | 974 |
return m ; |
975 |
} |
|
976 |
||
977 |
// 2: try to allocate from the global gFreeList |
|
978 |
// CONSIDER: use muxTry() instead of muxAcquire(). |
|
979 |
// If the muxTry() fails then drop immediately into case 3. |
|
980 |
// If we're using thread-local free lists then try |
|
981 |
// to reprovision the caller's free list. |
|
982 |
if (gFreeList != NULL) { |
|
983 |
// Reprovision the thread's omFreeList. |
|
984 |
// Use bulk transfers to reduce the allocation rate and heat |
|
985 |
// on various locks. |
|
986 |
Thread::muxAcquire (&ListLock, "omAlloc") ; |
|
987 |
for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) { |
|
5710 | 988 |
MonitorFreeCount --; |
1 | 989 |
ObjectMonitor * take = gFreeList ; |
990 |
gFreeList = take->FreeNext ; |
|
991 |
guarantee (take->object() == NULL, "invariant") ; |
|
992 |
guarantee (!take->is_busy(), "invariant") ; |
|
993 |
take->Recycle() ; |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
994 |
omRelease (Self, take, false) ; |
1 | 995 |
} |
996 |
Thread::muxRelease (&ListLock) ; |
|
997 |
Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; |
|
998 |
if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; |
|
999 |
TEVENT (omFirst - reprovision) ; |
|
5710 | 1000 |
|
1001 |
const int mx = MonitorBound ; |
|
1002 |
if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { |
|
1003 |
// We can't safely induce a STW safepoint from omAlloc() as our thread |
|
1004 |
// state may not be appropriate for such activities and callers may hold |
|
1005 |
// naked oops, so instead we defer the action. |
|
1006 |
InduceScavenge (Self, "omAlloc") ; |
|
1007 |
} |
|
1008 |
continue; |
|
1 | 1009 |
} |
1010 |
||
1011 |
// 3: allocate a block of new ObjectMonitors |
|
1012 |
// Both the local and global free lists are empty -- resort to malloc(). |
|
1013 |
// In the current implementation objectMonitors are TSM - immortal. |
|
1014 |
assert (_BLOCKSIZE > 1, "invariant") ; |
|
17031 | 1015 |
ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; |
1 | 1016 |
|
1017 |
// NOTE: (almost) no way to recover if allocation failed. |
|
1018 |
// We might be able to induce a STW safepoint and scavenge enough |
|
1019 |
// objectMonitors to permit progress. |
|
1020 |
if (temp == NULL) { |
|
17087
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
17031
diff
changeset
|
1021 |
vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, |
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
17031
diff
changeset
|
1022 |
"Allocate ObjectMonitors"); |
1 | 1023 |
} |
1024 |
||
1025 |
// Format the block. |
|
1026 |
// initialize the linked list, each monitor points to its next |
|
1027 |
// forming the single linked free list, the very first monitor |
|
1028 |
// will points to next block, which forms the block list. |
|
1029 |
// The trick of using the 1st element in the block as gBlockList |
|
1030 |
// linkage should be reconsidered. A better implementation would |
|
1031 |
// look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } |
|
1032 |
||
1033 |
for (int i = 1; i < _BLOCKSIZE ; i++) { |
|
1034 |
temp[i].FreeNext = &temp[i+1]; |
|
1035 |
} |
|
1036 |
||
1037 |
// terminate the last monitor as the end of list |
|
1038 |
temp[_BLOCKSIZE - 1].FreeNext = NULL ; |
|
1039 |
||
1040 |
// Element [0] is reserved for global list linkage |
|
1041 |
temp[0].set_object(CHAINMARKER); |
|
1042 |
||
1043 |
// Consider carving out this thread's current request from the |
|
1044 |
// block in hand. This avoids some lock traffic and redundant |
|
1045 |
// list activity. |
|
1046 |
||
1047 |
// Acquire the ListLock to manipulate BlockList and FreeList. |
|
1048 |
// An Oyama-Taura-Yonezawa scheme might be more efficient. |
|
1049 |
Thread::muxAcquire (&ListLock, "omAlloc [2]") ; |
|
5710 | 1050 |
MonitorPopulation += _BLOCKSIZE-1; |
1051 |
MonitorFreeCount += _BLOCKSIZE-1; |
|
1 | 1052 |
|
1053 |
// Add the new block to the list of extant blocks (gBlockList). |
|
1054 |
// The very first objectMonitor in a block is reserved and dedicated. |
|
1055 |
// It serves as blocklist "next" linkage. |
|
1056 |
temp[0].FreeNext = gBlockList; |
|
1057 |
gBlockList = temp; |
|
1058 |
||
1059 |
// Add the new string of objectMonitors to the global free list |
|
1060 |
temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; |
|
1061 |
gFreeList = temp + 1; |
|
1062 |
Thread::muxRelease (&ListLock) ; |
|
1063 |
TEVENT (Allocate block of monitors) ; |
|
1064 |
} |
|
1065 |
} |
|
1066 |
||
1067 |
// Place "m" on the caller's private per-thread omFreeList. |
|
1068 |
// In practice there's no need to clamp or limit the number of |
|
1069 |
// monitors on a thread's omFreeList as the only time we'll call |
|
1070 |
// omRelease is to return a monitor to the free list after a CAS |
|
1071 |
// attempt failed. This doesn't allow unbounded #s of monitors to |
|
1072 |
// accumulate on a thread's free list. |
|
1073 |
// |
|
1074 |
||
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1075 |
void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { |
1 | 1076 |
guarantee (m->object() == NULL, "invariant") ; |
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1077 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1078 |
// Remove from omInUseList |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1079 |
if (MonitorInUseLists && fromPerThreadAlloc) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1080 |
ObjectMonitor* curmidinuse = NULL; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1081 |
for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1082 |
if (m == mid) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1083 |
// extract from per-thread in-use-list |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1084 |
if (mid == Self->omInUseList) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1085 |
Self->omInUseList = mid->FreeNext; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1086 |
} else if (curmidinuse != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1087 |
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1088 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1089 |
Self->omInUseCount --; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1090 |
// verifyInUse(Self); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1091 |
break; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1092 |
} else { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1093 |
curmidinuse = mid; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1094 |
mid = mid->FreeNext; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1095 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1096 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1097 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1098 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1099 |
// FreeNext is used for both onInUseList and omFreeList, so clear old before setting new |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1100 |
m->FreeNext = Self->omFreeList ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1101 |
Self->omFreeList = m ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1102 |
Self->omFreeCount ++ ; |
1 | 1103 |
} |
1104 |
||
1105 |
// Return the monitors of a moribund thread's local free list to |
|
1106 |
// the global free list. Typically a thread calls omFlush() when |
|
1107 |
// it's dying. We could also consider having the VM thread steal |
|
1108 |
// monitors from threads that have not run java code over a few |
|
1109 |
// consecutive STW safepoints. Relatedly, we might decay |
|
1110 |
// omFreeProvision at STW safepoints. |
|
1111 |
// |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1112 |
// Also return the monitors of a moribund thread"s omInUseList to |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1113 |
// a global gOmInUseList under the global list lock so these |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1114 |
// will continue to be scanned. |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1115 |
// |
1 | 1116 |
// We currently call omFlush() from the Thread:: dtor _after the thread |
1117 |
// has been excised from the thread list and is no longer a mutator. |
|
1118 |
// That means that omFlush() can run concurrently with a safepoint and |
|
1119 |
// the scavenge operator. Calling omFlush() from JavaThread::exit() might |
|
1120 |
// be a better choice as we could safely reason that that the JVM is |
|
1121 |
// not at a safepoint at the time of the call, and thus there could |
|
1122 |
// be not inopportune interleavings between omFlush() and the scavenge |
|
1123 |
// operator. |
|
1124 |
||
1125 |
void ObjectSynchronizer::omFlush (Thread * Self) { |
|
1126 |
ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL |
|
1127 |
Self->omFreeList = NULL ; |
|
1128 |
ObjectMonitor * Tail = NULL ; |
|
5710 | 1129 |
int Tally = 0; |
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1130 |
if (List != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1131 |
ObjectMonitor * s ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1132 |
for (s = List ; s != NULL ; s = s->FreeNext) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1133 |
Tally ++ ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1134 |
Tail = s ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1135 |
guarantee (s->object() == NULL, "invariant") ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1136 |
guarantee (!s->is_busy(), "invariant") ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1137 |
s->set_owner (NULL) ; // redundant but good hygiene |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1138 |
TEVENT (omFlush - Move one) ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1139 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1140 |
guarantee (Tail != NULL && List != NULL, "invariant") ; |
1 | 1141 |
} |
1142 |
||
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1143 |
ObjectMonitor * InUseList = Self->omInUseList; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1144 |
ObjectMonitor * InUseTail = NULL ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1145 |
int InUseTally = 0; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1146 |
if (InUseList != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1147 |
Self->omInUseList = NULL; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1148 |
ObjectMonitor *curom; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1149 |
for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1150 |
InUseTail = curom; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1151 |
InUseTally++; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1152 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1153 |
// TODO debug |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1154 |
assert(Self->omInUseCount == InUseTally, "inuse count off"); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1155 |
Self->omInUseCount = 0; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1156 |
guarantee (InUseTail != NULL && InUseList != NULL, "invariant"); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1157 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1158 |
|
1 | 1159 |
Thread::muxAcquire (&ListLock, "omFlush") ; |
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1160 |
if (Tail != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1161 |
Tail->FreeNext = gFreeList ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1162 |
gFreeList = List ; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1163 |
MonitorFreeCount += Tally; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1164 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1165 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1166 |
if (InUseTail != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1167 |
InUseTail->FreeNext = gOmInUseList; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1168 |
gOmInUseList = InUseList; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1169 |
gOmInUseCount += InUseTally; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1170 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1171 |
|
1 | 1172 |
Thread::muxRelease (&ListLock) ; |
1173 |
TEVENT (omFlush) ; |
|
1174 |
} |
|
1175 |
||
1176 |
// Fast path code shared by multiple functions |
|
1177 |
ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { |
|
1178 |
markOop mark = obj->mark(); |
|
1179 |
if (mark->has_monitor()) { |
|
1180 |
assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); |
|
1181 |
assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); |
|
1182 |
return mark->monitor(); |
|
1183 |
} |
|
1184 |
return ObjectSynchronizer::inflate(Thread::current(), obj); |
|
1185 |
} |
|
1186 |
||
6975 | 1187 |
|
1 | 1188 |
// Note that we could encounter some performance loss through false-sharing as |
1189 |
// multiple locks occupy the same $ line. Padding might be appropriate. |
|
1190 |
||
1191 |
||
1192 |
ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { |
|
1193 |
// Inflate mutates the heap ... |
|
1194 |
// Relaxing assertion for bug 6320749. |
|
1195 |
assert (Universe::verify_in_progress() || |
|
1196 |
!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
|
1197 |
||
1198 |
for (;;) { |
|
1199 |
const markOop mark = object->mark() ; |
|
1200 |
assert (!mark->has_bias_pattern(), "invariant") ; |
|
1201 |
||
1202 |
// The mark can be in one of the following states: |
|
1203 |
// * Inflated - just return |
|
1204 |
// * Stack-locked - coerce it to inflated |
|
1205 |
// * INFLATING - busy wait for conversion to complete |
|
1206 |
// * Neutral - aggressively inflate the object. |
|
1207 |
// * BIASED - Illegal. We should never see this |
|
1208 |
||
1209 |
// CASE: inflated |
|
1210 |
if (mark->has_monitor()) { |
|
1211 |
ObjectMonitor * inf = mark->monitor() ; |
|
1212 |
assert (inf->header()->is_neutral(), "invariant"); |
|
1213 |
assert (inf->object() == object, "invariant") ; |
|
1214 |
assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); |
|
1215 |
return inf ; |
|
1216 |
} |
|
1217 |
||
1218 |
// CASE: inflation in progress - inflating over a stack-lock. |
|
1219 |
// Some other thread is converting from stack-locked to inflated. |
|
1220 |
// Only that thread can complete inflation -- other threads must wait. |
|
1221 |
// The INFLATING value is transient. |
|
1222 |
// Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. |
|
1223 |
// We could always eliminate polling by parking the thread on some auxiliary list. |
|
1224 |
if (mark == markOopDesc::INFLATING()) { |
|
1225 |
TEVENT (Inflate: spin while INFLATING) ; |
|
1226 |
ReadStableMark(object) ; |
|
1227 |
continue ; |
|
1228 |
} |
|
1229 |
||
1230 |
// CASE: stack-locked |
|
1231 |
// Could be stack-locked either by this thread or by some other thread. |
|
1232 |
// |
|
1233 |
// Note that we allocate the objectmonitor speculatively, _before_ attempting |
|
1234 |
// to install INFLATING into the mark word. We originally installed INFLATING, |
|
1235 |
// allocated the objectmonitor, and then finally STed the address of the |
|
1236 |
// objectmonitor into the mark. This was correct, but artificially lengthened |
|
1237 |
// the interval in which INFLATED appeared in the mark, thus increasing |
|
1238 |
// the odds of inflation contention. |
|
1239 |
// |
|
1240 |
// We now use per-thread private objectmonitor free lists. |
|
1241 |
// These list are reprovisioned from the global free list outside the |
|
1242 |
// critical INFLATING...ST interval. A thread can transfer |
|
1243 |
// multiple objectmonitors en-mass from the global free list to its local free list. |
|
1244 |
// This reduces coherency traffic and lock contention on the global free list. |
|
1245 |
// Using such local free lists, it doesn't matter if the omAlloc() call appears |
|
1246 |
// before or after the CAS(INFLATING) operation. |
|
1247 |
// See the comments in omAlloc(). |
|
1248 |
||
1249 |
if (mark->has_locker()) { |
|
1250 |
ObjectMonitor * m = omAlloc (Self) ; |
|
1251 |
// Optimistically prepare the objectmonitor - anticipate successful CAS |
|
1252 |
// We do this before the CAS in order to minimize the length of time |
|
1253 |
// in which INFLATING appears in the mark. |
|
1254 |
m->Recycle(); |
|
1255 |
m->_Responsible = NULL ; |
|
1256 |
m->OwnerIsThread = 0 ; |
|
1257 |
m->_recursions = 0 ; |
|
6975 | 1258 |
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class |
1 | 1259 |
|
1260 |
markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; |
|
1261 |
if (cmp != mark) { |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1262 |
omRelease (Self, m, true) ; |
1 | 1263 |
continue ; // Interference -- just retry |
1264 |
} |
|
1265 |
||
1266 |
// We've successfully installed INFLATING (0) into the mark-word. |
|
1267 |
// This is the only case where 0 will appear in a mark-work. |
|
1268 |
// Only the singular thread that successfully swings the mark-word |
|
1269 |
// to 0 can perform (or more precisely, complete) inflation. |
|
1270 |
// |
|
1271 |
// Why do we CAS a 0 into the mark-word instead of just CASing the |
|
1272 |
// mark-word from the stack-locked value directly to the new inflated state? |
|
1273 |
// Consider what happens when a thread unlocks a stack-locked object. |
|
1274 |
// It attempts to use CAS to swing the displaced header value from the |
|
1275 |
// on-stack basiclock back into the object header. Recall also that the |
|
1276 |
// header value (hashcode, etc) can reside in (a) the object header, or |
|
1277 |
// (b) a displaced header associated with the stack-lock, or (c) a displaced |
|
1278 |
// header in an objectMonitor. The inflate() routine must copy the header |
|
1279 |
// value from the basiclock on the owner's stack to the objectMonitor, all |
|
1280 |
// the while preserving the hashCode stability invariants. If the owner |
|
1281 |
// decides to release the lock while the value is 0, the unlock will fail |
|
1282 |
// and control will eventually pass from slow_exit() to inflate. The owner |
|
1283 |
// will then spin, waiting for the 0 value to disappear. Put another way, |
|
1284 |
// the 0 causes the owner to stall if the owner happens to try to |
|
1285 |
// drop the lock (restoring the header from the basiclock to the object) |
|
1286 |
// while inflation is in-progress. This protocol avoids races that might |
|
1287 |
// would otherwise permit hashCode values to change or "flicker" for an object. |
|
1288 |
// Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. |
|
1289 |
// 0 serves as a "BUSY" inflate-in-progress indicator. |
|
1290 |
||
1291 |
||
1292 |
// fetch the displaced mark from the owner's stack. |
|
1293 |
// The owner can't die or unwind past the lock while our INFLATING |
|
1294 |
// object is in the mark. Furthermore the owner can't complete |
|
1295 |
// an unlock on the object, either. |
|
1296 |
markOop dmw = mark->displaced_mark_helper() ; |
|
1297 |
assert (dmw->is_neutral(), "invariant") ; |
|
1298 |
||
1299 |
// Setup monitor fields to proper values -- prepare the monitor |
|
1300 |
m->set_header(dmw) ; |
|
1301 |
||
1302 |
// Optimization: if the mark->locker stack address is associated |
|
1303 |
// with this thread we could simply set m->_owner = Self and |
|
2526
39a58a50be35
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
2105
diff
changeset
|
1304 |
// m->OwnerIsThread = 1. Note that a thread can inflate an object |
1 | 1305 |
// that it has stack-locked -- as might happen in wait() -- directly |
1306 |
// with CAS. That is, we can avoid the xchg-NULL .... ST idiom. |
|
2526
39a58a50be35
6699669: Hotspot server leaves synchronized block with monitor in bad state
xlu
parents:
2105
diff
changeset
|
1307 |
m->set_owner(mark->locker()); |
1 | 1308 |
m->set_object(object); |
1309 |
// TODO-FIXME: assert BasicLock->dhw != 0. |
|
1310 |
||
1311 |
// Must preserve store ordering. The monitor state must |
|
1312 |
// be stable at the time of publishing the monitor address. |
|
1313 |
guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; |
|
1314 |
object->release_set_mark(markOopDesc::encode(m)); |
|
1315 |
||
1316 |
// Hopefully the performance counters are allocated on distinct cache lines |
|
1317 |
// to avoid false sharing on MP systems ... |
|
6975 | 1318 |
if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; |
1 | 1319 |
TEVENT(Inflate: overwrite stacklock) ; |
1320 |
if (TraceMonitorInflation) { |
|
1321 |
if (object->is_instance()) { |
|
1322 |
ResourceMark rm; |
|
1323 |
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
|
1324 |
(intptr_t) object, (intptr_t) object->mark(), |
|
14488 | 1325 |
object->klass()->external_name()); |
1 | 1326 |
} |
1327 |
} |
|
1328 |
return m ; |
|
1329 |
} |
|
1330 |
||
1331 |
// CASE: neutral |
|
1332 |
// TODO-FIXME: for entry we currently inflate and then try to CAS _owner. |
|
1333 |
// If we know we're inflating for entry it's better to inflate by swinging a |
|
1334 |
// pre-locked objectMonitor pointer into the object header. A successful |
|
1335 |
// CAS inflates the object *and* confers ownership to the inflating thread. |
|
1336 |
// In the current implementation we use a 2-step mechanism where we CAS() |
|
1337 |
// to inflate and then CAS() again to try to swing _owner from NULL to Self. |
|
1338 |
// An inflateTry() method that we could call from fast_enter() and slow_enter() |
|
1339 |
// would be useful. |
|
1340 |
||
1341 |
assert (mark->is_neutral(), "invariant"); |
|
1342 |
ObjectMonitor * m = omAlloc (Self) ; |
|
1343 |
// prepare m for installation - set monitor to initial state |
|
1344 |
m->Recycle(); |
|
1345 |
m->set_header(mark); |
|
1346 |
m->set_owner(NULL); |
|
1347 |
m->set_object(object); |
|
1348 |
m->OwnerIsThread = 1 ; |
|
1349 |
m->_recursions = 0 ; |
|
1350 |
m->_Responsible = NULL ; |
|
6975 | 1351 |
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class |
1 | 1352 |
|
1353 |
if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { |
|
1354 |
m->set_object (NULL) ; |
|
1355 |
m->set_owner (NULL) ; |
|
1356 |
m->OwnerIsThread = 0 ; |
|
1357 |
m->Recycle() ; |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1358 |
omRelease (Self, m, true) ; |
1 | 1359 |
m = NULL ; |
1360 |
continue ; |
|
1361 |
// interference - the markword changed - just retry. |
|
1362 |
// The state-transitions are one-way, so there's no chance of |
|
1363 |
// live-lock -- "Inflated" is an absorbing state. |
|
1364 |
} |
|
1365 |
||
1366 |
// Hopefully the performance counters are allocated on distinct |
|
1367 |
// cache lines to avoid false sharing on MP systems ... |
|
6975 | 1368 |
if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; |
1 | 1369 |
TEVENT(Inflate: overwrite neutral) ; |
1370 |
if (TraceMonitorInflation) { |
|
1371 |
if (object->is_instance()) { |
|
1372 |
ResourceMark rm; |
|
1373 |
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
|
1374 |
(intptr_t) object, (intptr_t) object->mark(), |
|
14488 | 1375 |
object->klass()->external_name()); |
1 | 1376 |
} |
1377 |
} |
|
1378 |
return m ; |
|
1379 |
} |
|
1380 |
} |
|
1381 |
||
6975 | 1382 |
// Note that we could encounter some performance loss through false-sharing as |
1383 |
// multiple locks occupy the same $ line. Padding might be appropriate. |
|
1 | 1384 |
|
1385 |
||
1386 |
// Deflate_idle_monitors() is called at all safepoints, immediately |
|
1387 |
// after all mutators are stopped, but before any objects have moved. |
|
1388 |
// It traverses the list of known monitors, deflating where possible. |
|
1389 |
// The scavenged monitor are returned to the monitor free list. |
|
1390 |
// |
|
1391 |
// Beware that we scavenge at *every* stop-the-world point. |
|
1392 |
// Having a large number of monitors in-circulation negatively |
|
1393 |
// impacts the performance of some applications (e.g., PointBase). |
|
1394 |
// Broadly, we want to minimize the # of monitors in circulation. |
|
5710 | 1395 |
// |
1396 |
// We have added a flag, MonitorInUseLists, which creates a list |
|
1397 |
// of active monitors for each thread. deflate_idle_monitors() |
|
1398 |
// only scans the per-thread inuse lists. omAlloc() puts all |
|
1399 |
// assigned monitors on the per-thread list. deflate_idle_monitors() |
|
1400 |
// returns the non-busy monitors to the global free list. |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1401 |
// When a thread dies, omFlush() adds the list of active monitors for |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1402 |
// that thread to a global gOmInUseList acquiring the |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1403 |
// global list lock. deflate_idle_monitors() acquires the global |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1404 |
// list lock to scan for non-busy monitors to the global free list. |
5710 | 1405 |
// An alternative could have used a single global inuse list. The |
1406 |
// downside would have been the additional cost of acquiring the global list lock |
|
1407 |
// for every omAlloc(). |
|
1 | 1408 |
// |
1409 |
// Perversely, the heap size -- and thus the STW safepoint rate -- |
|
1410 |
// typically drives the scavenge rate. Large heaps can mean infrequent GC, |
|
1411 |
// which in turn can mean large(r) numbers of objectmonitors in circulation. |
|
1412 |
// This is an unfortunate aspect of this design. |
|
1413 |
// |
|
1414 |
||
6975 | 1415 |
enum ManifestConstants { |
1416 |
ClearResponsibleAtSTW = 0, |
|
1417 |
MaximumRecheckInterval = 1000 |
|
1418 |
} ; |
|
5710 | 1419 |
|
1420 |
// Deflate a single monitor if not in use |
|
1421 |
// Return true if deflated, false if in use |
|
1422 |
bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, |
|
1423 |
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { |
|
1424 |
bool deflated; |
|
1425 |
// Normal case ... The monitor is associated with obj. |
|
1426 |
guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; |
|
1427 |
guarantee (mid == obj->mark()->monitor(), "invariant"); |
|
1428 |
guarantee (mid->header()->is_neutral(), "invariant"); |
|
1429 |
||
1430 |
if (mid->is_busy()) { |
|
1431 |
if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; |
|
1432 |
deflated = false; |
|
1433 |
} else { |
|
1434 |
// Deflate the monitor if it is no longer being used |
|
1435 |
// It's idle - scavenge and return to the global free list |
|
1436 |
// plain old deflation ... |
|
1437 |
TEVENT (deflate_idle_monitors - scavenge1) ; |
|
1438 |
if (TraceMonitorInflation) { |
|
1439 |
if (obj->is_instance()) { |
|
1440 |
ResourceMark rm; |
|
1441 |
tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
|
14488 | 1442 |
(intptr_t) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); |
5710 | 1443 |
} |
1444 |
} |
|
1445 |
||
1446 |
// Restore the header back to obj |
|
1447 |
obj->release_set_mark(mid->header()); |
|
1448 |
mid->clear(); |
|
1449 |
||
1450 |
assert (mid->object() == NULL, "invariant") ; |
|
1451 |
||
1452 |
// Move the object to the working free list defined by FreeHead,FreeTail. |
|
1453 |
if (*FreeHeadp == NULL) *FreeHeadp = mid; |
|
1454 |
if (*FreeTailp != NULL) { |
|
1455 |
ObjectMonitor * prevtail = *FreeTailp; |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1456 |
assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK |
5710 | 1457 |
prevtail->FreeNext = mid; |
1458 |
} |
|
1459 |
*FreeTailp = mid; |
|
1460 |
deflated = true; |
|
1461 |
} |
|
1462 |
return deflated; |
|
1463 |
} |
|
1464 |
||
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1465 |
// Caller acquires ListLock |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1466 |
int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1467 |
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1468 |
ObjectMonitor* mid; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1469 |
ObjectMonitor* next; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1470 |
ObjectMonitor* curmidinuse = NULL; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1471 |
int deflatedcount = 0; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1472 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1473 |
for (mid = *listheadp; mid != NULL; ) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1474 |
oop obj = (oop) mid->object(); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1475 |
bool deflated = false; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1476 |
if (obj != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1477 |
deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1478 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1479 |
if (deflated) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1480 |
// extract from per-thread in-use-list |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1481 |
if (mid == *listheadp) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1482 |
*listheadp = mid->FreeNext; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1483 |
} else if (curmidinuse != NULL) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1484 |
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1485 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1486 |
next = mid->FreeNext; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1487 |
mid->FreeNext = NULL; // This mid is current tail in the FreeHead list |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1488 |
mid = next; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1489 |
deflatedcount++; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1490 |
} else { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1491 |
curmidinuse = mid; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1492 |
mid = mid->FreeNext; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1493 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1494 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1495 |
return deflatedcount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1496 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1497 |
|
1 | 1498 |
void ObjectSynchronizer::deflate_idle_monitors() { |
1499 |
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
|
1500 |
int nInuse = 0 ; // currently associated with objects |
|
1501 |
int nInCirculation = 0 ; // extant |
|
1502 |
int nScavenged = 0 ; // reclaimed |
|
5710 | 1503 |
bool deflated = false; |
1 | 1504 |
|
1505 |
ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors |
|
1506 |
ObjectMonitor * FreeTail = NULL ; |
|
1507 |
||
5710 | 1508 |
TEVENT (deflate_idle_monitors) ; |
1509 |
// Prevent omFlush from changing mids in Thread dtor's during deflation |
|
1510 |
// And in case the vm thread is acquiring a lock during a safepoint |
|
1511 |
// See e.g. 6320749 |
|
1512 |
Thread::muxAcquire (&ListLock, "scavenge - return") ; |
|
1513 |
||
1514 |
if (MonitorInUseLists) { |
|
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1515 |
int inUse = 0; |
5710 | 1516 |
for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { |
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1517 |
nInCirculation+= cur->omInUseCount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1518 |
int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1519 |
cur->omInUseCount-= deflatedcount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1520 |
// verifyInUse(cur); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1521 |
nScavenged += deflatedcount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1522 |
nInuse += cur->omInUseCount; |
5710 | 1523 |
} |
5920
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1524 |
|
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1525 |
// For moribund threads, scan gOmInUseList |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1526 |
if (gOmInUseList) { |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1527 |
nInCirculation += gOmInUseCount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1528 |
int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1529 |
gOmInUseCount-= deflatedcount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1530 |
nScavenged += deflatedcount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1531 |
nInuse += gOmInUseCount; |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1532 |
} |
8fdbb85e62d3
6964164: MonitorInUseLists leak of contended objects
acorn
parents:
5712
diff
changeset
|
1533 |
|
5710 | 1534 |
} else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { |
1 | 1535 |
// Iterate over all extant monitors - Scavenge all idle monitors. |
1536 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
1537 |
nInCirculation += _BLOCKSIZE ; |
|
1538 |
for (int i = 1 ; i < _BLOCKSIZE; i++) { |
|
1539 |
ObjectMonitor* mid = &block[i]; |
|
1540 |
oop obj = (oop) mid->object(); |
|
1541 |
||
1542 |
if (obj == NULL) { |
|
1543 |
// The monitor is not associated with an object. |
|
1544 |
// The monitor should either be a thread-specific private |
|
1545 |
// free list or the global free list. |
|
1546 |
// obj == NULL IMPLIES mid->is_busy() == 0 |
|
1547 |
guarantee (!mid->is_busy(), "invariant") ; |
|
1548 |
continue ; |
|
1549 |
} |
|
5710 | 1550 |
deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); |
1551 |
||
1552 |
if (deflated) { |
|
1553 |
mid->FreeNext = NULL ; |
|
1554 |
nScavenged ++ ; |
|
1 | 1555 |
} else { |
5710 | 1556 |
nInuse ++; |
1 | 1557 |
} |
1558 |
} |
|
1559 |
} |
|
1560 |
||
5710 | 1561 |
MonitorFreeCount += nScavenged; |
1562 |
||
1563 |
// Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. |
|
1564 |
||
6975 | 1565 |
if (ObjectMonitor::Knob_Verbose) { |
5710 | 1566 |
::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", |
1567 |
nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, |
|
1568 |
MonitorPopulation, MonitorFreeCount) ; |
|
1569 |
::fflush(stdout) ; |
|
1570 |
} |
|
1571 |
||
1572 |
ForceMonitorScavenge = 0; // Reset |
|
1573 |
||
1 | 1574 |
// Move the scavenged monitors back to the global free list. |
1575 |
if (FreeHead != NULL) { |
|
1576 |
guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; |
|
1577 |
assert (FreeTail->FreeNext == NULL, "invariant") ; |
|
1578 |
// constant-time list splice - prepend scavenged segment to gFreeList |
|
1579 |
FreeTail->FreeNext = gFreeList ; |
|
1580 |
gFreeList = FreeHead ; |
|
1581 |
} |
|
5710 | 1582 |
Thread::muxRelease (&ListLock) ; |
1 | 1583 |
|
6975 | 1584 |
if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ; |
1585 |
if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); |
|
1 | 1586 |
|
1587 |
// TODO: Add objectMonitor leak detection. |
|
1588 |
// Audit/inventory the objectMonitors -- make sure they're all accounted for. |
|
1589 |
GVars.stwRandom = os::random() ; |
|
1590 |
GVars.stwCycle ++ ; |
|
1591 |
} |
|
1592 |
||
6975 | 1593 |
// Monitor cleanup on JavaThread::exit |
1 | 1594 |
|
6975 | 1595 |
// Iterate through monitor cache and attempt to release thread's monitors |
1596 |
// Gives up on a particular monitor if an exception occurs, but continues |
|
1597 |
// the overall iteration, swallowing the exception. |
|
1598 |
class ReleaseJavaMonitorsClosure: public MonitorClosure { |
|
1599 |
private: |
|
1600 |
TRAPS; |
|
1 | 1601 |
|
6975 | 1602 |
public: |
1603 |
ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} |
|
1604 |
void do_monitor(ObjectMonitor* mid) { |
|
1605 |
if (mid->owner() == THREAD) { |
|
1606 |
(void)mid->complete_exit(CHECK); |
|
1 | 1607 |
} |
1608 |
} |
|
6975 | 1609 |
}; |
1 | 1610 |
|
6975 | 1611 |
// Release all inflated monitors owned by THREAD. Lightweight monitors are |
1612 |
// ignored. This is meant to be called during JNI thread detach which assumes |
|
1613 |
// all remaining monitors are heavyweight. All exceptions are swallowed. |
|
1614 |
// Scanning the extant monitor list can be time consuming. |
|
1615 |
// A simple optimization is to add a per-thread flag that indicates a thread |
|
1616 |
// called jni_monitorenter() during its lifetime. |
|
1 | 1617 |
// |
6975 | 1618 |
// Instead of No_Savepoint_Verifier it might be cheaper to |
1619 |
// use an idiom of the form: |
|
1620 |
// auto int tmp = SafepointSynchronize::_safepoint_counter ; |
|
1621 |
// <code that must not run at safepoint> |
|
1622 |
// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; |
|
1623 |
// Since the tests are extremely cheap we could leave them enabled |
|
1624 |
// for normal product builds. |
|
1 | 1625 |
|
6975 | 1626 |
void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { |
1627 |
assert(THREAD == JavaThread::current(), "must be current Java thread"); |
|
1628 |
No_Safepoint_Verifier nsv ; |
|
1629 |
ReleaseJavaMonitorsClosure rjmc(THREAD); |
|
1630 |
Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); |
|
1631 |
ObjectSynchronizer::monitors_iterate(&rjmc); |
|
1632 |
Thread::muxRelease(&ListLock); |
|
1633 |
THREAD->clear_pending_exception(); |
|
1 | 1634 |
} |
1635 |
||
1636 |
//------------------------------------------------------------------------------ |
|
1637 |
// Non-product code |
|
1638 |
||
1639 |
#ifndef PRODUCT |
|
1640 |
||
1641 |
// Verify all monitors in the monitor cache, the verification is weak. |
|
1642 |
void ObjectSynchronizer::verify() { |
|
1643 |
ObjectMonitor* block = gBlockList; |
|
1644 |
ObjectMonitor* mid; |
|
1645 |
while (block) { |
|
1646 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
1647 |
for (int i = 1; i < _BLOCKSIZE; i++) { |
|
1648 |
mid = block + i; |
|
1649 |
oop object = (oop) mid->object(); |
|
1650 |
if (object != NULL) { |
|
1651 |
mid->verify(); |
|
1652 |
} |
|
1653 |
} |
|
1654 |
block = (ObjectMonitor*) block->FreeNext; |
|
1655 |
} |
|
1656 |
} |
|
1657 |
||
1658 |
// Check if monitor belongs to the monitor cache |
|
1659 |
// The list is grow-only so it's *relatively* safe to traverse |
|
1660 |
// the list of extant blocks without taking a lock. |
|
1661 |
||
1662 |
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { |
|
1663 |
ObjectMonitor* block = gBlockList; |
|
1664 |
||
1665 |
while (block) { |
|
1666 |
assert(block->object() == CHAINMARKER, "must be a block header"); |
|
1667 |
if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { |
|
1668 |
address mon = (address) monitor; |
|
1669 |
address blk = (address) block; |
|
1670 |
size_t diff = mon - blk; |
|
1671 |
assert((diff % sizeof(ObjectMonitor)) == 0, "check"); |
|
1672 |
return 1; |
|
1673 |
} |
|
1674 |
block = (ObjectMonitor*) block->FreeNext; |
|
1675 |
} |
|
1676 |
return 0; |
|
1677 |
} |
|
1678 |
||
1679 |
#endif |