author | zgu |
Thu, 28 Jun 2012 17:03:16 -0400 | |
changeset 13195 | be27e1b6a4b9 |
parent 7397 | 5b173b4ca846 |
child 13728 | 882756847a04 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
7397 | 2 |
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3261
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3261
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
3261
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "oops/klass.inline.hpp" |
|
27 |
#include "oops/markOop.hpp" |
|
28 |
#include "runtime/basicLock.hpp" |
|
29 |
#include "runtime/biasedLocking.hpp" |
|
30 |
#include "runtime/task.hpp" |
|
31 |
#include "runtime/vframe.hpp" |
|
32 |
#include "runtime/vmThread.hpp" |
|
33 |
#include "runtime/vm_operations.hpp" |
|
1 | 34 |
|
35 |
static bool _biased_locking_enabled = false; |
|
36 |
BiasedLockingCounters BiasedLocking::_counters; |
|
37 |
||
38 |
static GrowableArray<Handle>* _preserved_oop_stack = NULL; |
|
39 |
static GrowableArray<markOop>* _preserved_mark_stack = NULL; |
|
40 |
||
41 |
static void enable_biased_locking(klassOop k) { |
|
42 |
Klass::cast(k)->set_prototype_header(markOopDesc::biased_locking_prototype()); |
|
43 |
} |
|
44 |
||
45 |
class VM_EnableBiasedLocking: public VM_Operation { |
|
257
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
46 |
private: |
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
47 |
bool _is_cheap_allocated; |
1 | 48 |
public: |
257
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
49 |
VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } |
231
3cfeeae32993
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
sbohne
parents:
1
diff
changeset
|
50 |
VMOp_Type type() const { return VMOp_EnableBiasedLocking; } |
257
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
51 |
Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } |
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
52 |
bool is_cheap_allocated() const { return _is_cheap_allocated; } |
231
3cfeeae32993
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
sbohne
parents:
1
diff
changeset
|
53 |
|
1 | 54 |
void doit() { |
55 |
// Iterate the system dictionary enabling biased locking for all |
|
56 |
// currently loaded classes |
|
57 |
SystemDictionary::classes_do(enable_biased_locking); |
|
58 |
// Indicate that future instances should enable it as well |
|
59 |
_biased_locking_enabled = true; |
|
60 |
||
61 |
if (TraceBiasedLocking) { |
|
62 |
tty->print_cr("Biased locking enabled"); |
|
63 |
} |
|
64 |
} |
|
65 |
||
66 |
bool allow_nested_vm_operations() const { return false; } |
|
67 |
}; |
|
68 |
||
69 |
||
70 |
// One-shot PeriodicTask subclass for enabling biased locking |
|
71 |
class EnableBiasedLockingTask : public PeriodicTask { |
|
72 |
public: |
|
73 |
EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} |
|
74 |
||
75 |
virtual void task() { |
|
231
3cfeeae32993
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
sbohne
parents:
1
diff
changeset
|
76 |
// Use async VM operation to avoid blocking the Watcher thread. |
3cfeeae32993
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
sbohne
parents:
1
diff
changeset
|
77 |
// VM Thread will free C heap storage. |
257
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
78 |
VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); |
231
3cfeeae32993
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
sbohne
parents:
1
diff
changeset
|
79 |
VMThread::execute(op); |
1 | 80 |
|
81 |
// Reclaim our storage and disenroll ourself |
|
82 |
delete this; |
|
83 |
} |
|
84 |
}; |
|
85 |
||
86 |
||
87 |
void BiasedLocking::init() { |
|
88 |
// If biased locking is enabled, schedule a task to fire a few |
|
89 |
// seconds into the run which turns on biased locking for all |
|
90 |
// currently loaded classes as well as future ones. This is a |
|
91 |
// workaround for startup time regressions due to a large number of |
|
92 |
// safepoints being taken during VM startup for bias revocation. |
|
93 |
// Ideally we would have a lower cost for individual bias revocation |
|
94 |
// and not need a mechanism like this. |
|
95 |
if (UseBiasedLocking) { |
|
96 |
if (BiasedLockingStartupDelay > 0) { |
|
97 |
EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); |
|
98 |
task->enroll(); |
|
99 |
} else { |
|
257
229330365385
6692235: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
sbohne
parents:
231
diff
changeset
|
100 |
VM_EnableBiasedLocking op(false); |
1 | 101 |
VMThread::execute(&op); |
102 |
} |
|
103 |
} |
|
104 |
} |
|
105 |
||
106 |
||
107 |
bool BiasedLocking::enabled() { |
|
108 |
return _biased_locking_enabled; |
|
109 |
} |
|
110 |
||
111 |
// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order |
|
112 |
static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { |
|
113 |
GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); |
|
114 |
if (info != NULL) { |
|
115 |
return info; |
|
116 |
} |
|
117 |
||
118 |
info = new GrowableArray<MonitorInfo*>(); |
|
119 |
||
120 |
// It's possible for the thread to not have any Java frames on it, |
|
121 |
// i.e., if it's the main thread and it's already returned from main() |
|
122 |
if (thread->has_last_Java_frame()) { |
|
123 |
RegisterMap rm(thread); |
|
124 |
for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { |
|
125 |
GrowableArray<MonitorInfo*> *monitors = vf->monitors(); |
|
126 |
if (monitors != NULL) { |
|
127 |
int len = monitors->length(); |
|
128 |
// Walk monitors youngest to oldest |
|
129 |
for (int i = len - 1; i >= 0; i--) { |
|
130 |
MonitorInfo* mon_info = monitors->at(i); |
|
3171
aa289b22b577
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
1600
diff
changeset
|
131 |
if (mon_info->owner_is_scalar_replaced()) continue; |
1 | 132 |
oop owner = mon_info->owner(); |
133 |
if (owner != NULL) { |
|
134 |
info->append(mon_info); |
|
135 |
} |
|
136 |
} |
|
137 |
} |
|
138 |
} |
|
139 |
} |
|
140 |
||
141 |
thread->set_cached_monitor_info(info); |
|
142 |
return info; |
|
143 |
} |
|
144 |
||
145 |
||
146 |
static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { |
|
147 |
markOop mark = obj->mark(); |
|
148 |
if (!mark->has_bias_pattern()) { |
|
149 |
if (TraceBiasedLocking) { |
|
150 |
ResourceMark rm; |
|
151 |
tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", |
|
152 |
Klass::cast(obj->klass())->external_name()); |
|
153 |
} |
|
154 |
return BiasedLocking::NOT_BIASED; |
|
155 |
} |
|
156 |
||
157 |
int age = mark->age(); |
|
158 |
markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); |
|
159 |
markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); |
|
160 |
||
161 |
if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
|
162 |
ResourceMark rm; |
|
163 |
tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT, |
|
164 |
(intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread); |
|
165 |
} |
|
166 |
||
167 |
JavaThread* biased_thread = mark->biased_locker(); |
|
168 |
if (biased_thread == NULL) { |
|
169 |
// Object is anonymously biased. We can get here if, for |
|
170 |
// example, we revoke the bias due to an identity hash code |
|
171 |
// being computed for an object. |
|
172 |
if (!allow_rebias) { |
|
173 |
obj->set_mark(unbiased_prototype); |
|
174 |
} |
|
175 |
if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
|
176 |
tty->print_cr(" Revoked bias of anonymously-biased object"); |
|
177 |
} |
|
178 |
return BiasedLocking::BIAS_REVOKED; |
|
179 |
} |
|
180 |
||
181 |
// Handle case where the thread toward which the object was biased has exited |
|
182 |
bool thread_is_alive = false; |
|
183 |
if (requesting_thread == biased_thread) { |
|
184 |
thread_is_alive = true; |
|
185 |
} else { |
|
186 |
for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) { |
|
187 |
if (cur_thread == biased_thread) { |
|
188 |
thread_is_alive = true; |
|
189 |
break; |
|
190 |
} |
|
191 |
} |
|
192 |
} |
|
193 |
if (!thread_is_alive) { |
|
194 |
if (allow_rebias) { |
|
195 |
obj->set_mark(biased_prototype); |
|
196 |
} else { |
|
197 |
obj->set_mark(unbiased_prototype); |
|
198 |
} |
|
199 |
if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
|
200 |
tty->print_cr(" Revoked bias of object biased toward dead thread"); |
|
201 |
} |
|
202 |
return BiasedLocking::BIAS_REVOKED; |
|
203 |
} |
|
204 |
||
205 |
// Thread owning bias is alive. |
|
206 |
// Check to see whether it currently owns the lock and, if so, |
|
207 |
// write down the needed displaced headers to the thread's stack. |
|
208 |
// Otherwise, restore the object's header either to the unlocked |
|
209 |
// or unbiased state. |
|
210 |
GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); |
|
211 |
BasicLock* highest_lock = NULL; |
|
212 |
for (int i = 0; i < cached_monitor_info->length(); i++) { |
|
213 |
MonitorInfo* mon_info = cached_monitor_info->at(i); |
|
214 |
if (mon_info->owner() == obj) { |
|
215 |
if (TraceBiasedLocking && Verbose) { |
|
216 |
tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", |
|
217 |
(intptr_t) mon_info->owner(), |
|
218 |
(intptr_t) obj); |
|
219 |
} |
|
220 |
// Assume recursive case and fix up highest lock later |
|
221 |
markOop mark = markOopDesc::encode((BasicLock*) NULL); |
|
222 |
highest_lock = mon_info->lock(); |
|
223 |
highest_lock->set_displaced_header(mark); |
|
224 |
} else { |
|
225 |
if (TraceBiasedLocking && Verbose) { |
|
226 |
tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", |
|
227 |
(intptr_t) mon_info->owner(), |
|
228 |
(intptr_t) obj); |
|
229 |
} |
|
230 |
} |
|
231 |
} |
|
232 |
if (highest_lock != NULL) { |
|
233 |
// Fix up highest lock to contain displaced header and point |
|
234 |
// object at it |
|
235 |
highest_lock->set_displaced_header(unbiased_prototype); |
|
236 |
// Reset object header to point to displaced mark |
|
237 |
obj->set_mark(markOopDesc::encode(highest_lock)); |
|
238 |
assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); |
|
239 |
if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
|
240 |
tty->print_cr(" Revoked bias of currently-locked object"); |
|
241 |
} |
|
242 |
} else { |
|
243 |
if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
|
244 |
tty->print_cr(" Revoked bias of currently-unlocked object"); |
|
245 |
} |
|
246 |
if (allow_rebias) { |
|
247 |
obj->set_mark(biased_prototype); |
|
248 |
} else { |
|
249 |
// Store the unlocked value into the object's header. |
|
250 |
obj->set_mark(unbiased_prototype); |
|
251 |
} |
|
252 |
} |
|
253 |
||
254 |
return BiasedLocking::BIAS_REVOKED; |
|
255 |
} |
|
256 |
||
257 |
||
258 |
enum HeuristicsResult { |
|
259 |
HR_NOT_BIASED = 1, |
|
260 |
HR_SINGLE_REVOKE = 2, |
|
261 |
HR_BULK_REBIAS = 3, |
|
262 |
HR_BULK_REVOKE = 4 |
|
263 |
}; |
|
264 |
||
265 |
||
266 |
static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { |
|
267 |
markOop mark = o->mark(); |
|
268 |
if (!mark->has_bias_pattern()) { |
|
269 |
return HR_NOT_BIASED; |
|
270 |
} |
|
271 |
||
272 |
// Heuristics to attempt to throttle the number of revocations. |
|
273 |
// Stages: |
|
274 |
// 1. Revoke the biases of all objects in the heap of this type, |
|
275 |
// but allow rebiasing of those objects if unlocked. |
|
276 |
// 2. Revoke the biases of all objects in the heap of this type |
|
277 |
// and don't allow rebiasing of these objects. Disable |
|
278 |
// allocation of objects of that type with the bias bit set. |
|
279 |
Klass* k = o->blueprint(); |
|
280 |
jlong cur_time = os::javaTimeMillis(); |
|
281 |
jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); |
|
282 |
int revocation_count = k->biased_lock_revocation_count(); |
|
283 |
if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && |
|
284 |
(revocation_count < BiasedLockingBulkRevokeThreshold) && |
|
285 |
(last_bulk_revocation_time != 0) && |
|
286 |
(cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { |
|
287 |
// This is the first revocation we've seen in a while of an |
|
288 |
// object of this type since the last time we performed a bulk |
|
289 |
// rebiasing operation. The application is allocating objects in |
|
290 |
// bulk which are biased toward a thread and then handing them |
|
291 |
// off to another thread. We can cope with this allocation |
|
292 |
// pattern via the bulk rebiasing mechanism so we reset the |
|
293 |
// klass's revocation count rather than allow it to increase |
|
294 |
// monotonically. If we see the need to perform another bulk |
|
295 |
// rebias operation later, we will, and if subsequently we see |
|
296 |
// many more revocation operations in a short period of time we |
|
297 |
// will completely disable biasing for this type. |
|
298 |
k->set_biased_lock_revocation_count(0); |
|
299 |
revocation_count = 0; |
|
300 |
} |
|
301 |
||
302 |
// Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold |
|
303 |
if (revocation_count <= BiasedLockingBulkRevokeThreshold) { |
|
304 |
revocation_count = k->atomic_incr_biased_lock_revocation_count(); |
|
305 |
} |
|
306 |
||
307 |
if (revocation_count == BiasedLockingBulkRevokeThreshold) { |
|
308 |
return HR_BULK_REVOKE; |
|
309 |
} |
|
310 |
||
311 |
if (revocation_count == BiasedLockingBulkRebiasThreshold) { |
|
312 |
return HR_BULK_REBIAS; |
|
313 |
} |
|
314 |
||
315 |
return HR_SINGLE_REVOKE; |
|
316 |
} |
|
317 |
||
318 |
||
319 |
static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, |
|
320 |
bool bulk_rebias, |
|
321 |
bool attempt_rebias_of_object, |
|
322 |
JavaThread* requesting_thread) { |
|
323 |
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); |
|
324 |
||
325 |
if (TraceBiasedLocking) { |
|
326 |
tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " |
|
327 |
INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
|
328 |
(bulk_rebias ? "rebias" : "revoke"), |
|
329 |
(intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name()); |
|
330 |
} |
|
331 |
||
332 |
jlong cur_time = os::javaTimeMillis(); |
|
333 |
o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time); |
|
334 |
||
335 |
||
336 |
klassOop k_o = o->klass(); |
|
337 |
Klass* klass = Klass::cast(k_o); |
|
338 |
||
339 |
if (bulk_rebias) { |
|
340 |
// Use the epoch in the klass of the object to implicitly revoke |
|
341 |
// all biases of objects of this data type and force them to be |
|
342 |
// reacquired. However, we also need to walk the stacks of all |
|
343 |
// threads and update the headers of lightweight locked objects |
|
344 |
// with biases to have the current epoch. |
|
345 |
||
346 |
// If the prototype header doesn't have the bias pattern, don't |
|
347 |
// try to update the epoch -- assume another VM operation came in |
|
348 |
// and reset the header to the unbiased state, which will |
|
349 |
// implicitly cause all existing biases to be revoked |
|
350 |
if (klass->prototype_header()->has_bias_pattern()) { |
|
351 |
int prev_epoch = klass->prototype_header()->bias_epoch(); |
|
352 |
klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); |
|
353 |
int cur_epoch = klass->prototype_header()->bias_epoch(); |
|
354 |
||
355 |
// Now walk all threads' stacks and adjust epochs of any biased |
|
356 |
// and locked objects of this data type we encounter |
|
357 |
for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
|
358 |
GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); |
|
359 |
for (int i = 0; i < cached_monitor_info->length(); i++) { |
|
360 |
MonitorInfo* mon_info = cached_monitor_info->at(i); |
|
361 |
oop owner = mon_info->owner(); |
|
362 |
markOop mark = owner->mark(); |
|
363 |
if ((owner->klass() == k_o) && mark->has_bias_pattern()) { |
|
364 |
// We might have encountered this object already in the case of recursive locking |
|
365 |
assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); |
|
366 |
owner->set_mark(mark->set_bias_epoch(cur_epoch)); |
|
367 |
} |
|
368 |
} |
|
369 |
} |
|
370 |
} |
|
371 |
||
372 |
// At this point we're done. All we have to do is potentially |
|
373 |
// adjust the header of the given object to revoke its bias. |
|
374 |
revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); |
|
375 |
} else { |
|
376 |
if (TraceBiasedLocking) { |
|
377 |
ResourceMark rm; |
|
378 |
tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); |
|
379 |
} |
|
380 |
||
381 |
// Disable biased locking for this data type. Not only will this |
|
382 |
// cause future instances to not be biased, but existing biased |
|
383 |
// instances will notice that this implicitly caused their biases |
|
384 |
// to be revoked. |
|
385 |
klass->set_prototype_header(markOopDesc::prototype()); |
|
386 |
||
387 |
// Now walk all threads' stacks and forcibly revoke the biases of |
|
388 |
// any locked and biased objects of this data type we encounter. |
|
389 |
for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
|
390 |
GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); |
|
391 |
for (int i = 0; i < cached_monitor_info->length(); i++) { |
|
392 |
MonitorInfo* mon_info = cached_monitor_info->at(i); |
|
393 |
oop owner = mon_info->owner(); |
|
394 |
markOop mark = owner->mark(); |
|
395 |
if ((owner->klass() == k_o) && mark->has_bias_pattern()) { |
|
396 |
revoke_bias(owner, false, true, requesting_thread); |
|
397 |
} |
|
398 |
} |
|
399 |
} |
|
400 |
||
401 |
// Must force the bias of the passed object to be forcibly revoked |
|
402 |
// as well to ensure guarantees to callers |
|
403 |
revoke_bias(o, false, true, requesting_thread); |
|
404 |
} |
|
405 |
||
406 |
if (TraceBiasedLocking) { |
|
407 |
tty->print_cr("* Ending bulk revocation"); |
|
408 |
} |
|
409 |
||
410 |
BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; |
|
411 |
||
412 |
if (attempt_rebias_of_object && |
|
413 |
o->mark()->has_bias_pattern() && |
|
414 |
klass->prototype_header()->has_bias_pattern()) { |
|
415 |
markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), |
|
416 |
klass->prototype_header()->bias_epoch()); |
|
417 |
o->set_mark(new_mark); |
|
418 |
status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; |
|
419 |
if (TraceBiasedLocking) { |
|
420 |
tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); |
|
421 |
} |
|
422 |
} |
|
423 |
||
424 |
assert(!o->mark()->has_bias_pattern() || |
|
425 |
(attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), |
|
426 |
"bug in bulk bias revocation"); |
|
427 |
||
428 |
return status_code; |
|
429 |
} |
|
430 |
||
431 |
||
432 |
static void clean_up_cached_monitor_info() { |
|
433 |
// Walk the thread list clearing out the cached monitors |
|
434 |
for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
|
435 |
thr->set_cached_monitor_info(NULL); |
|
436 |
} |
|
437 |
} |
|
438 |
||
439 |
||
440 |
class VM_RevokeBias : public VM_Operation { |
|
441 |
protected: |
|
442 |
Handle* _obj; |
|
443 |
GrowableArray<Handle>* _objs; |
|
444 |
JavaThread* _requesting_thread; |
|
445 |
BiasedLocking::Condition _status_code; |
|
446 |
||
447 |
public: |
|
448 |
VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) |
|
449 |
: _obj(obj) |
|
450 |
, _objs(NULL) |
|
451 |
, _requesting_thread(requesting_thread) |
|
452 |
, _status_code(BiasedLocking::NOT_BIASED) {} |
|
453 |
||
454 |
VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) |
|
455 |
: _obj(NULL) |
|
456 |
, _objs(objs) |
|
457 |
, _requesting_thread(requesting_thread) |
|
458 |
, _status_code(BiasedLocking::NOT_BIASED) {} |
|
459 |
||
460 |
virtual VMOp_Type type() const { return VMOp_RevokeBias; } |
|
461 |
||
462 |
virtual bool doit_prologue() { |
|
463 |
// Verify that there is actual work to do since the callers just |
|
464 |
// give us locked object(s). If we don't find any biased objects |
|
465 |
// there is nothing to do and we avoid a safepoint. |
|
466 |
if (_obj != NULL) { |
|
467 |
markOop mark = (*_obj)()->mark(); |
|
468 |
if (mark->has_bias_pattern()) { |
|
469 |
return true; |
|
470 |
} |
|
471 |
} else { |
|
472 |
for ( int i = 0 ; i < _objs->length(); i++ ) { |
|
473 |
markOop mark = (_objs->at(i))()->mark(); |
|
474 |
if (mark->has_bias_pattern()) { |
|
475 |
return true; |
|
476 |
} |
|
477 |
} |
|
478 |
} |
|
479 |
return false; |
|
480 |
} |
|
481 |
||
482 |
virtual void doit() { |
|
483 |
if (_obj != NULL) { |
|
484 |
if (TraceBiasedLocking) { |
|
485 |
tty->print_cr("Revoking bias with potentially per-thread safepoint:"); |
|
486 |
} |
|
487 |
_status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); |
|
488 |
clean_up_cached_monitor_info(); |
|
489 |
return; |
|
490 |
} else { |
|
491 |
if (TraceBiasedLocking) { |
|
492 |
tty->print_cr("Revoking bias with global safepoint:"); |
|
493 |
} |
|
494 |
BiasedLocking::revoke_at_safepoint(_objs); |
|
495 |
} |
|
496 |
} |
|
497 |
||
498 |
BiasedLocking::Condition status_code() const { |
|
499 |
return _status_code; |
|
500 |
} |
|
501 |
}; |
|
502 |
||
503 |
||
504 |
class VM_BulkRevokeBias : public VM_RevokeBias { |
|
505 |
private: |
|
506 |
bool _bulk_rebias; |
|
507 |
bool _attempt_rebias_of_object; |
|
508 |
||
509 |
public: |
|
510 |
VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, |
|
511 |
bool bulk_rebias, |
|
512 |
bool attempt_rebias_of_object) |
|
513 |
: VM_RevokeBias(obj, requesting_thread) |
|
514 |
, _bulk_rebias(bulk_rebias) |
|
515 |
, _attempt_rebias_of_object(attempt_rebias_of_object) {} |
|
516 |
||
517 |
virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } |
|
518 |
virtual bool doit_prologue() { return true; } |
|
519 |
||
520 |
virtual void doit() { |
|
521 |
_status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); |
|
522 |
clean_up_cached_monitor_info(); |
|
523 |
} |
|
524 |
}; |
|
525 |
||
526 |
||
527 |
BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { |
|
528 |
assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); |
|
529 |
||
530 |
// We can revoke the biases of anonymously-biased objects |
|
531 |
// efficiently enough that we should not cause these revocations to |
|
532 |
// update the heuristics because doing so may cause unwanted bulk |
|
533 |
// revocations (which are expensive) to occur. |
|
534 |
markOop mark = obj->mark(); |
|
535 |
if (mark->is_biased_anonymously() && !attempt_rebias) { |
|
536 |
// We are probably trying to revoke the bias of this object due to |
|
537 |
// an identity hash code computation. Try to revoke the bias |
|
538 |
// without a safepoint. This is possible if we can successfully |
|
539 |
// compare-and-exchange an unbiased header into the mark word of |
|
540 |
// the object, meaning that no other thread has raced to acquire |
|
541 |
// the bias of the object. |
|
542 |
markOop biased_value = mark; |
|
543 |
markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); |
|
544 |
markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); |
|
545 |
if (res_mark == biased_value) { |
|
546 |
return BIAS_REVOKED; |
|
547 |
} |
|
548 |
} else if (mark->has_bias_pattern()) { |
|
549 |
Klass* k = Klass::cast(obj->klass()); |
|
550 |
markOop prototype_header = k->prototype_header(); |
|
551 |
if (!prototype_header->has_bias_pattern()) { |
|
552 |
// This object has a stale bias from before the bulk revocation |
|
553 |
// for this data type occurred. It's pointless to update the |
|
554 |
// heuristics at this point so simply update the header with a |
|
555 |
// CAS. If we fail this race, the object's bias has been revoked |
|
556 |
// by another thread so we simply return and let the caller deal |
|
557 |
// with it. |
|
558 |
markOop biased_value = mark; |
|
559 |
markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark); |
|
560 |
assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); |
|
561 |
return BIAS_REVOKED; |
|
562 |
} else if (prototype_header->bias_epoch() != mark->bias_epoch()) { |
|
563 |
// The epoch of this biasing has expired indicating that the |
|
564 |
// object is effectively unbiased. Depending on whether we need |
|
565 |
// to rebias or revoke the bias of this object we can do it |
|
566 |
// efficiently enough with a CAS that we shouldn't update the |
|
567 |
// heuristics. This is normally done in the assembly code but we |
|
568 |
// can reach this point due to various points in the runtime |
|
569 |
// needing to revoke biases. |
|
570 |
if (attempt_rebias) { |
|
571 |
assert(THREAD->is_Java_thread(), ""); |
|
572 |
markOop biased_value = mark; |
|
573 |
markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); |
|
574 |
markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark); |
|
575 |
if (res_mark == biased_value) { |
|
576 |
return BIAS_REVOKED_AND_REBIASED; |
|
577 |
} |
|
578 |
} else { |
|
579 |
markOop biased_value = mark; |
|
580 |
markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); |
|
581 |
markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); |
|
582 |
if (res_mark == biased_value) { |
|
583 |
return BIAS_REVOKED; |
|
584 |
} |
|
585 |
} |
|
586 |
} |
|
587 |
} |
|
588 |
||
589 |
HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); |
|
590 |
if (heuristics == HR_NOT_BIASED) { |
|
591 |
return NOT_BIASED; |
|
592 |
} else if (heuristics == HR_SINGLE_REVOKE) { |
|
1600
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
593 |
Klass *k = Klass::cast(obj->klass()); |
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
594 |
markOop prototype_header = k->prototype_header(); |
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
595 |
if (mark->biased_locker() == THREAD && |
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
596 |
prototype_header->bias_epoch() == mark->bias_epoch()) { |
1 | 597 |
// A thread is trying to revoke the bias of an object biased |
598 |
// toward it, again likely due to an identity hash code |
|
599 |
// computation. We can again avoid a safepoint in this case |
|
600 |
// since we are only going to walk our own stack. There are no |
|
601 |
// races with revocations occurring in other threads because we |
|
602 |
// reach no safepoints in the revocation path. |
|
1600
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
603 |
// Also check the epoch because even if threads match, another thread |
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
604 |
// can come in with a CAS to steal the bias of an object that has a |
776009a04496
6676175: BigApps crash JVM Client VM (build 10.0-b22, mixed mode, sharing) with SIGSEGV (0xb)
coleenp
parents:
670
diff
changeset
|
605 |
// stale epoch. |
1 | 606 |
ResourceMark rm; |
607 |
if (TraceBiasedLocking) { |
|
608 |
tty->print_cr("Revoking bias by walking my own stack:"); |
|
609 |
} |
|
610 |
BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); |
|
611 |
((JavaThread*) THREAD)->set_cached_monitor_info(NULL); |
|
612 |
assert(cond == BIAS_REVOKED, "why not?"); |
|
613 |
return cond; |
|
614 |
} else { |
|
615 |
VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); |
|
616 |
VMThread::execute(&revoke); |
|
617 |
return revoke.status_code(); |
|
618 |
} |
|
619 |
} |
|
620 |
||
621 |
assert((heuristics == HR_BULK_REVOKE) || |
|
622 |
(heuristics == HR_BULK_REBIAS), "?"); |
|
623 |
VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, |
|
624 |
(heuristics == HR_BULK_REBIAS), |
|
625 |
attempt_rebias); |
|
626 |
VMThread::execute(&bulk_revoke); |
|
627 |
return bulk_revoke.status_code(); |
|
628 |
} |
|
629 |
||
630 |
||
631 |
void BiasedLocking::revoke(GrowableArray<Handle>* objs) { |
|
632 |
assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); |
|
633 |
if (objs->length() == 0) { |
|
634 |
return; |
|
635 |
} |
|
636 |
VM_RevokeBias revoke(objs, JavaThread::current()); |
|
637 |
VMThread::execute(&revoke); |
|
638 |
} |
|
639 |
||
640 |
||
641 |
void BiasedLocking::revoke_at_safepoint(Handle h_obj) { |
|
642 |
assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
|
643 |
oop obj = h_obj(); |
|
644 |
HeuristicsResult heuristics = update_heuristics(obj, false); |
|
645 |
if (heuristics == HR_SINGLE_REVOKE) { |
|
646 |
revoke_bias(obj, false, false, NULL); |
|
647 |
} else if ((heuristics == HR_BULK_REBIAS) || |
|
648 |
(heuristics == HR_BULK_REVOKE)) { |
|
649 |
bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); |
|
650 |
} |
|
651 |
clean_up_cached_monitor_info(); |
|
652 |
} |
|
653 |
||
654 |
||
655 |
void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { |
|
656 |
assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
|
657 |
int len = objs->length(); |
|
658 |
for (int i = 0; i < len; i++) { |
|
659 |
oop obj = (objs->at(i))(); |
|
660 |
HeuristicsResult heuristics = update_heuristics(obj, false); |
|
661 |
if (heuristics == HR_SINGLE_REVOKE) { |
|
662 |
revoke_bias(obj, false, false, NULL); |
|
663 |
} else if ((heuristics == HR_BULK_REBIAS) || |
|
664 |
(heuristics == HR_BULK_REVOKE)) { |
|
665 |
bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); |
|
666 |
} |
|
667 |
} |
|
668 |
clean_up_cached_monitor_info(); |
|
669 |
} |
|
670 |
||
671 |
||
672 |
void BiasedLocking::preserve_marks() { |
|
673 |
if (!UseBiasedLocking) |
|
674 |
return; |
|
675 |
||
676 |
assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
|
677 |
||
678 |
assert(_preserved_oop_stack == NULL, "double initialization"); |
|
679 |
assert(_preserved_mark_stack == NULL, "double initialization"); |
|
680 |
||
681 |
// In order to reduce the number of mark words preserved during GC |
|
682 |
// due to the presence of biased locking, we reinitialize most mark |
|
683 |
// words to the class's prototype during GC -- even those which have |
|
684 |
// a currently valid bias owner. One important situation where we |
|
685 |
// must not clobber a bias is when a biased object is currently |
|
686 |
// locked. To handle this case we iterate over the currently-locked |
|
687 |
// monitors in a prepass and, if they are biased, preserve their |
|
688 |
// mark words here. This should be a relatively small set of objects |
|
689 |
// especially compared to the number of objects in the heap. |
|
13195 | 690 |
_preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); |
691 |
_preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); |
|
1 | 692 |
|
693 |
ResourceMark rm; |
|
694 |
Thread* cur = Thread::current(); |
|
695 |
for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { |
|
696 |
if (thread->has_last_Java_frame()) { |
|
697 |
RegisterMap rm(thread); |
|
698 |
for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { |
|
699 |
GrowableArray<MonitorInfo*> *monitors = vf->monitors(); |
|
700 |
if (monitors != NULL) { |
|
701 |
int len = monitors->length(); |
|
702 |
// Walk monitors youngest to oldest |
|
703 |
for (int i = len - 1; i >= 0; i--) { |
|
704 |
MonitorInfo* mon_info = monitors->at(i); |
|
3171
aa289b22b577
6837472: com/sun/jdi/MonitorFrameInfo.java fails with AggressiveOpts in 6u14
kvn
parents:
1600
diff
changeset
|
705 |
if (mon_info->owner_is_scalar_replaced()) continue; |
1 | 706 |
oop owner = mon_info->owner(); |
707 |
if (owner != NULL) { |
|
708 |
markOop mark = owner->mark(); |
|
709 |
if (mark->has_bias_pattern()) { |
|
710 |
_preserved_oop_stack->push(Handle(cur, owner)); |
|
711 |
_preserved_mark_stack->push(mark); |
|
712 |
} |
|
713 |
} |
|
714 |
} |
|
715 |
} |
|
716 |
} |
|
717 |
} |
|
718 |
} |
|
719 |
} |
|
720 |
||
721 |
||
722 |
void BiasedLocking::restore_marks() { |
|
723 |
if (!UseBiasedLocking) |
|
724 |
return; |
|
725 |
||
726 |
assert(_preserved_oop_stack != NULL, "double free"); |
|
727 |
assert(_preserved_mark_stack != NULL, "double free"); |
|
728 |
||
729 |
int len = _preserved_oop_stack->length(); |
|
730 |
for (int i = 0; i < len; i++) { |
|
731 |
Handle owner = _preserved_oop_stack->at(i); |
|
732 |
markOop mark = _preserved_mark_stack->at(i); |
|
733 |
owner->set_mark(mark); |
|
734 |
} |
|
735 |
||
736 |
delete _preserved_oop_stack; |
|
737 |
_preserved_oop_stack = NULL; |
|
738 |
delete _preserved_mark_stack; |
|
739 |
_preserved_mark_stack = NULL; |
|
740 |
} |
|
741 |
||
742 |
||
743 |
int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } |
|
744 |
int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } |
|
745 |
int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } |
|
746 |
int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } |
|
747 |
int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } |
|
748 |
int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } |
|
749 |
int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } |
|
750 |
||
751 |
||
752 |
// BiasedLockingCounters |
|
753 |
||
754 |
int BiasedLockingCounters::slow_path_entry_count() { |
|
755 |
if (_slow_path_entry_count != 0) { |
|
756 |
return _slow_path_entry_count; |
|
757 |
} |
|
758 |
int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + |
|
759 |
_rebiased_lock_entry_count + _revoked_lock_entry_count + |
|
760 |
_fast_path_entry_count; |
|
761 |
||
762 |
return _total_entry_count - sum; |
|
763 |
} |
|
764 |
||
765 |
void BiasedLockingCounters::print_on(outputStream* st) { |
|
766 |
tty->print_cr("# total entries: %d", _total_entry_count); |
|
767 |
tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); |
|
768 |
tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); |
|
769 |
tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); |
|
770 |
tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); |
|
771 |
tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); |
|
772 |
tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); |
|
773 |
} |