256 |
255 |
257 return false; // revert to slow-path |
256 return false; // revert to slow-path |
258 } |
257 } |
259 |
258 |
260 // ----------------------------------------------------------------------------- |
259 // ----------------------------------------------------------------------------- |
261 // Fast Monitor Enter/Exit |
260 // Monitor Enter/Exit |
262 // This the fast monitor enter. The interpreter and compiler use |
261 // The interpreter and compiler assembly code tries to lock using the fast path |
263 // some assembly copies of this code. Make sure update those code |
262 // of this algorithm. Make sure to update that code if the following function is |
264 // if the following function is changed. The implementation is |
263 // changed. The implementation is extremely sensitive to race condition. Be careful. |
265 // extremely sensitive to race condition. Be careful. |
264 |
266 |
265 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { |
267 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, |
|
268 bool attempt_rebias, TRAPS) { |
|
269 if (UseBiasedLocking) { |
266 if (UseBiasedLocking) { |
270 if (!SafepointSynchronize::is_at_safepoint()) { |
267 if (!SafepointSynchronize::is_at_safepoint()) { |
271 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); |
268 BiasedLocking::revoke(obj, THREAD); |
272 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { |
|
273 return; |
|
274 } |
|
275 } else { |
269 } else { |
276 assert(!attempt_rebias, "can not rebias toward VM thread"); |
|
277 BiasedLocking::revoke_at_safepoint(obj); |
270 BiasedLocking::revoke_at_safepoint(obj); |
278 } |
271 } |
279 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
272 } |
280 } |
273 |
281 |
274 markWord mark = obj->mark(); |
282 slow_enter(obj, lock, THREAD); |
275 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); |
283 } |
276 |
284 |
277 if (mark.is_neutral()) { |
285 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { |
278 // Anticipate successful CAS -- the ST of the displaced mark must |
286 markOop mark = object->mark(); |
279 // be visible <= the ST performed by the CAS. |
|
280 lock->set_displaced_header(mark); |
|
281 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { |
|
282 return; |
|
283 } |
|
284 // Fall through to inflate() ... |
|
285 } else if (mark.has_locker() && |
|
286 THREAD->is_lock_owned((address)mark.locker())) { |
|
287 assert(lock != mark.locker(), "must not re-lock the same lock"); |
|
288 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); |
|
289 lock->set_displaced_header(markWord::from_pointer(NULL)); |
|
290 return; |
|
291 } |
|
292 |
|
293 // The object header will never be displaced to this lock, |
|
294 // so it does not matter what the value is, except that it |
|
295 // must be non-zero to avoid looking like a re-entrant lock, |
|
296 // and must not look locked either. |
|
297 lock->set_displaced_header(markWord::unused_mark()); |
|
298 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); |
|
299 } |
|
300 |
|
301 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { |
|
302 markWord mark = object->mark(); |
287 // We cannot check for Biased Locking if we are racing an inflation. |
303 // We cannot check for Biased Locking if we are racing an inflation. |
288 assert(mark == markOopDesc::INFLATING() || |
304 assert(mark == markWord::INFLATING() || |
289 !mark->has_bias_pattern(), "should not see bias pattern here"); |
305 !mark.has_bias_pattern(), "should not see bias pattern here"); |
290 |
306 |
291 markOop dhw = lock->displaced_header(); |
307 markWord dhw = lock->displaced_header(); |
292 if (dhw == NULL) { |
308 if (dhw.value() == 0) { |
293 // If the displaced header is NULL, then this exit matches up with |
309 // If the displaced header is NULL, then this exit matches up with |
294 // a recursive enter. No real work to do here except for diagnostics. |
310 // a recursive enter. No real work to do here except for diagnostics. |
295 #ifndef PRODUCT |
311 #ifndef PRODUCT |
296 if (mark != markOopDesc::INFLATING()) { |
312 if (mark != markWord::INFLATING()) { |
297 // Only do diagnostics if we are not racing an inflation. Simply |
313 // Only do diagnostics if we are not racing an inflation. Simply |
298 // exiting a recursive enter of a Java Monitor that is being |
314 // exiting a recursive enter of a Java Monitor that is being |
299 // inflated is safe; see the has_monitor() comment below. |
315 // inflated is safe; see the has_monitor() comment below. |
300 assert(!mark->is_neutral(), "invariant"); |
316 assert(!mark.is_neutral(), "invariant"); |
301 assert(!mark->has_locker() || |
317 assert(!mark.has_locker() || |
302 THREAD->is_lock_owned((address)mark->locker()), "invariant"); |
318 THREAD->is_lock_owned((address)mark.locker()), "invariant"); |
303 if (mark->has_monitor()) { |
319 if (mark.has_monitor()) { |
304 // The BasicLock's displaced_header is marked as a recursive |
320 // The BasicLock's displaced_header is marked as a recursive |
305 // enter and we have an inflated Java Monitor (ObjectMonitor). |
321 // enter and we have an inflated Java Monitor (ObjectMonitor). |
306 // This is a special case where the Java Monitor was inflated |
322 // This is a special case where the Java Monitor was inflated |
307 // after this thread entered the stack-lock recursively. When a |
323 // after this thread entered the stack-lock recursively. When a |
308 // Java Monitor is inflated, we cannot safely walk the Java |
324 // Java Monitor is inflated, we cannot safely walk the Java |
309 // Monitor owner's stack and update the BasicLocks because a |
325 // Monitor owner's stack and update the BasicLocks because a |
310 // Java Monitor can be asynchronously inflated by a thread that |
326 // Java Monitor can be asynchronously inflated by a thread that |
311 // does not own the Java Monitor. |
327 // does not own the Java Monitor. |
312 ObjectMonitor * m = mark->monitor(); |
328 ObjectMonitor* m = mark.monitor(); |
313 assert(((oop)(m->object()))->mark() == mark, "invariant"); |
329 assert(((oop)(m->object()))->mark() == mark, "invariant"); |
314 assert(m->is_entered(THREAD), "invariant"); |
330 assert(m->is_entered(THREAD), "invariant"); |
315 } |
331 } |
316 } |
332 } |
317 #endif |
333 #endif |
318 return; |
334 return; |
319 } |
335 } |
320 |
336 |
321 if (mark == (markOop) lock) { |
337 if (mark == markWord::from_pointer(lock)) { |
322 // If the object is stack-locked by the current thread, try to |
338 // If the object is stack-locked by the current thread, try to |
323 // swing the displaced header from the BasicLock back to the mark. |
339 // swing the displaced header from the BasicLock back to the mark. |
324 assert(dhw->is_neutral(), "invariant"); |
340 assert(dhw.is_neutral(), "invariant"); |
325 if (object->cas_set_mark(dhw, mark) == mark) { |
341 if (object->cas_set_mark(dhw, mark) == mark) { |
326 return; |
342 return; |
327 } |
343 } |
328 } |
344 } |
329 |
345 |
330 // We have to take the slow-path of possible inflation and then exit. |
346 // We have to take the slow-path of possible inflation and then exit. |
331 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); |
347 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); |
332 } |
|
333 |
|
334 // ----------------------------------------------------------------------------- |
|
335 // Interpreter/Compiler Slow Case |
|
336 // This routine is used to handle interpreter/compiler slow case |
|
337 // We don't need to use fast path here, because it must have been |
|
338 // failed in the interpreter/compiler code. |
|
339 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { |
|
340 markOop mark = obj->mark(); |
|
341 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); |
|
342 |
|
343 if (mark->is_neutral()) { |
|
344 // Anticipate successful CAS -- the ST of the displaced mark must |
|
345 // be visible <= the ST performed by the CAS. |
|
346 lock->set_displaced_header(mark); |
|
347 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { |
|
348 return; |
|
349 } |
|
350 // Fall through to inflate() ... |
|
351 } else if (mark->has_locker() && |
|
352 THREAD->is_lock_owned((address)mark->locker())) { |
|
353 assert(lock != mark->locker(), "must not re-lock the same lock"); |
|
354 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); |
|
355 lock->set_displaced_header(NULL); |
|
356 return; |
|
357 } |
|
358 |
|
359 // The object header will never be displaced to this lock, |
|
360 // so it does not matter what the value is, except that it |
|
361 // must be non-zero to avoid looking like a re-entrant lock, |
|
362 // and must not look locked either. |
|
363 lock->set_displaced_header(markOopDesc::unused_mark()); |
|
364 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); |
|
365 } |
|
366 |
|
367 // This routine is used to handle interpreter/compiler slow case |
|
368 // We don't need to use fast path here, because it must have |
|
369 // failed in the interpreter/compiler code. Simply use the heavy |
|
370 // weight monitor should be ok, unless someone find otherwise. |
|
371 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { |
|
372 fast_exit(object, lock, THREAD); |
|
373 } |
348 } |
374 |
349 |
375 // ----------------------------------------------------------------------------- |
350 // ----------------------------------------------------------------------------- |
376 // Class Loader support to workaround deadlocks on the class loader lock objects |
351 // Class Loader support to workaround deadlocks on the class loader lock objects |
377 // Also used by GC |
352 // Also used by GC |
410 // JNI locks on java objects |
385 // JNI locks on java objects |
411 // NOTE: must use heavy weight monitor to handle jni monitor enter |
386 // NOTE: must use heavy weight monitor to handle jni monitor enter |
412 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { |
387 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { |
413 // the current locking is from JNI instead of Java code |
388 // the current locking is from JNI instead of Java code |
414 if (UseBiasedLocking) { |
389 if (UseBiasedLocking) { |
415 BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
390 BiasedLocking::revoke(obj, THREAD); |
416 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
391 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); |
417 } |
392 } |
418 THREAD->set_current_pending_monitor_is_from_java(false); |
393 THREAD->set_current_pending_monitor_is_from_java(false); |
419 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); |
394 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); |
420 THREAD->set_current_pending_monitor_is_from_java(true); |
395 THREAD->set_current_pending_monitor_is_from_java(true); |
421 } |
396 } |
422 |
397 |
423 // NOTE: must use heavy weight monitor to handle jni monitor exit |
398 // NOTE: must use heavy weight monitor to handle jni monitor exit |
424 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { |
399 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { |
425 if (UseBiasedLocking) { |
400 if (UseBiasedLocking) { |
426 Handle h_obj(THREAD, obj); |
401 Handle h_obj(THREAD, obj); |
427 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); |
402 BiasedLocking::revoke(h_obj, THREAD); |
428 obj = h_obj(); |
403 obj = h_obj(); |
429 } |
404 } |
430 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
405 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); |
431 |
406 |
432 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); |
407 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); |
433 // If this thread has locked the object, exit the monitor. Note: can't use |
408 // If this thread has locked the object, exit the monitor. We |
434 // monitor->check(CHECK); must exit even if an exception is pending. |
409 // intentionally do not use CHECK here because we must exit the |
435 if (monitor->check(THREAD)) { |
410 // monitor even if an exception is pending. |
|
411 if (monitor->check_owner(THREAD)) { |
436 monitor->exit(true, THREAD); |
412 monitor->exit(true, THREAD); |
437 } |
413 } |
438 } |
414 } |
439 |
415 |
440 // ----------------------------------------------------------------------------- |
416 // ----------------------------------------------------------------------------- |
441 // Internal VM locks on java objects |
417 // Internal VM locks on java objects |
442 // standard constructor, allows locking failures |
418 // standard constructor, allows locking failures |
443 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { |
419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { |
444 _dolock = doLock; |
420 _dolock = do_lock; |
445 _thread = thread; |
421 _thread = thread; |
446 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) |
422 _thread->check_for_valid_safepoint_state(); |
447 _obj = obj; |
423 _obj = obj; |
448 |
424 |
449 if (_dolock) { |
425 if (_dolock) { |
450 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); |
426 ObjectSynchronizer::enter(_obj, &_lock, _thread); |
451 } |
427 } |
452 } |
428 } |
453 |
429 |
454 ObjectLocker::~ObjectLocker() { |
430 ObjectLocker::~ObjectLocker() { |
455 if (_dolock) { |
431 if (_dolock) { |
456 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); |
432 ObjectSynchronizer::exit(_obj(), &_lock, _thread); |
457 } |
433 } |
458 } |
434 } |
459 |
435 |
460 |
436 |
461 // ----------------------------------------------------------------------------- |
437 // ----------------------------------------------------------------------------- |
462 // Wait/Notify/NotifyAll |
438 // Wait/Notify/NotifyAll |
463 // NOTE: must use heavy weight monitor to handle wait() |
439 // NOTE: must use heavy weight monitor to handle wait() |
464 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { |
440 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { |
465 if (UseBiasedLocking) { |
441 if (UseBiasedLocking) { |
466 BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
442 BiasedLocking::revoke(obj, THREAD); |
467 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
443 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); |
468 } |
444 } |
469 if (millis < 0) { |
445 if (millis < 0) { |
470 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
446 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
471 } |
447 } |
472 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); |
448 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); |
623 } |
599 } |
624 |
600 |
625 // hashCode() generation : |
601 // hashCode() generation : |
626 // |
602 // |
627 // Possibilities: |
603 // Possibilities: |
628 // * MD5Digest of {obj,stwRandom} |
604 // * MD5Digest of {obj,stw_random} |
629 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. |
605 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. |
630 // * A DES- or AES-style SBox[] mechanism |
606 // * A DES- or AES-style SBox[] mechanism |
631 // * One of the Phi-based schemes, such as: |
607 // * One of the Phi-based schemes, such as: |
632 // 2654435761 = 2^32 * Phi (golden ratio) |
608 // 2654435761 = 2^32 * Phi (golden ratio) |
633 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; |
609 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; |
634 // * A variation of Marsaglia's shift-xor RNG scheme. |
610 // * A variation of Marsaglia's shift-xor RNG scheme. |
635 // * (obj ^ stwRandom) is appealing, but can result |
611 // * (obj ^ stw_random) is appealing, but can result |
636 // in undesirable regularity in the hashCode values of adjacent objects |
612 // in undesirable regularity in the hashCode values of adjacent objects |
637 // (objects allocated back-to-back, in particular). This could potentially |
613 // (objects allocated back-to-back, in particular). This could potentially |
638 // result in hashtable collisions and reduced hashtable efficiency. |
614 // result in hashtable collisions and reduced hashtable efficiency. |
639 // There are simple ways to "diffuse" the middle address bits over the |
615 // There are simple ways to "diffuse" the middle address bits over the |
640 // generated hashCode values: |
616 // generated hashCode values: |
641 |
617 |
642 static inline intptr_t get_next_hash(Thread * Self, oop obj) { |
618 static inline intptr_t get_next_hash(Thread* self, oop obj) { |
643 intptr_t value = 0; |
619 intptr_t value = 0; |
644 if (hashCode == 0) { |
620 if (hashCode == 0) { |
645 // This form uses global Park-Miller RNG. |
621 // This form uses global Park-Miller RNG. |
646 // On MP system we'll have lots of RW access to a global, so the |
622 // On MP system we'll have lots of RW access to a global, so the |
647 // mechanism induces lots of coherency traffic. |
623 // mechanism induces lots of coherency traffic. |
648 value = os::random(); |
624 value = os::random(); |
649 } else if (hashCode == 1) { |
625 } else if (hashCode == 1) { |
650 // This variation has the property of being stable (idempotent) |
626 // This variation has the property of being stable (idempotent) |
651 // between STW operations. This can be useful in some of the 1-0 |
627 // between STW operations. This can be useful in some of the 1-0 |
652 // synchronization schemes. |
628 // synchronization schemes. |
653 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; |
629 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; |
654 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; |
630 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; |
655 } else if (hashCode == 2) { |
631 } else if (hashCode == 2) { |
656 value = 1; // for sensitivity testing |
632 value = 1; // for sensitivity testing |
657 } else if (hashCode == 3) { |
633 } else if (hashCode == 3) { |
658 value = ++GVars.hcSequence; |
634 value = ++GVars.hc_sequence; |
659 } else if (hashCode == 4) { |
635 } else if (hashCode == 4) { |
660 value = cast_from_oop<intptr_t>(obj); |
636 value = cast_from_oop<intptr_t>(obj); |
661 } else { |
637 } else { |
662 // Marsaglia's xor-shift scheme with thread-specific state |
638 // Marsaglia's xor-shift scheme with thread-specific state |
663 // This is probably the best overall implementation -- we'll |
639 // This is probably the best overall implementation -- we'll |
664 // likely make this the default in future releases. |
640 // likely make this the default in future releases. |
665 unsigned t = Self->_hashStateX; |
641 unsigned t = self->_hashStateX; |
666 t ^= (t << 11); |
642 t ^= (t << 11); |
667 Self->_hashStateX = Self->_hashStateY; |
643 self->_hashStateX = self->_hashStateY; |
668 Self->_hashStateY = Self->_hashStateZ; |
644 self->_hashStateY = self->_hashStateZ; |
669 Self->_hashStateZ = Self->_hashStateW; |
645 self->_hashStateZ = self->_hashStateW; |
670 unsigned v = Self->_hashStateW; |
646 unsigned v = self->_hashStateW; |
671 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); |
647 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); |
672 Self->_hashStateW = v; |
648 self->_hashStateW = v; |
673 value = v; |
649 value = v; |
674 } |
650 } |
675 |
651 |
676 value &= markOopDesc::hash_mask; |
652 value &= markWord::hash_mask; |
677 if (value == 0) value = 0xBAD; |
653 if (value == 0) value = 0xBAD; |
678 assert(value != markOopDesc::no_hash, "invariant"); |
654 assert(value != markWord::no_hash, "invariant"); |
679 return value; |
655 return value; |
680 } |
656 } |
681 |
657 |
682 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { |
658 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { |
683 if (UseBiasedLocking) { |
659 if (UseBiasedLocking) { |
684 // NOTE: many places throughout the JVM do not expect a safepoint |
660 // NOTE: many places throughout the JVM do not expect a safepoint |
685 // to be taken here, in particular most operations on perm gen |
661 // to be taken here, in particular most operations on perm gen |
686 // objects. However, we only ever bias Java instances and all of |
662 // objects. However, we only ever bias Java instances and all of |
687 // the call sites of identity_hash that might revoke biases have |
663 // the call sites of identity_hash that might revoke biases have |
688 // been checked to make sure they can handle a safepoint. The |
664 // been checked to make sure they can handle a safepoint. The |
689 // added check of the bias pattern is to avoid useless calls to |
665 // added check of the bias pattern is to avoid useless calls to |
690 // thread-local storage. |
666 // thread-local storage. |
691 if (obj->mark()->has_bias_pattern()) { |
667 if (obj->mark().has_bias_pattern()) { |
692 // Handle for oop obj in case of STW safepoint |
668 // Handle for oop obj in case of STW safepoint |
693 Handle hobj(Self, obj); |
669 Handle hobj(self, obj); |
694 // Relaxing assertion for bug 6320749. |
670 // Relaxing assertion for bug 6320749. |
695 assert(Universe::verify_in_progress() || |
671 assert(Universe::verify_in_progress() || |
696 !SafepointSynchronize::is_at_safepoint(), |
672 !SafepointSynchronize::is_at_safepoint(), |
697 "biases should not be seen by VM thread here"); |
673 "biases should not be seen by VM thread here"); |
698 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); |
674 BiasedLocking::revoke(hobj, JavaThread::current()); |
699 obj = hobj(); |
675 obj = hobj(); |
700 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
676 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); |
701 } |
677 } |
702 } |
678 } |
703 |
679 |
704 // hashCode() is a heap mutator ... |
680 // hashCode() is a heap mutator ... |
705 // Relaxing assertion for bug 6320749. |
681 // Relaxing assertion for bug 6320749. |
706 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
682 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
707 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
683 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
708 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
684 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
709 Self->is_Java_thread() , "invariant"); |
685 self->is_Java_thread() , "invariant"); |
710 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
686 assert(Universe::verify_in_progress() || DumpSharedSpaces || |
711 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); |
687 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); |
712 |
688 |
713 ObjectMonitor* monitor = NULL; |
689 ObjectMonitor* monitor = NULL; |
714 markOop temp, test; |
690 markWord temp, test; |
715 intptr_t hash; |
691 intptr_t hash; |
716 markOop mark = ReadStableMark(obj); |
692 markWord mark = read_stable_mark(obj); |
717 |
693 |
718 // object should remain ineligible for biased locking |
694 // object should remain ineligible for biased locking |
719 assert(!mark->has_bias_pattern(), "invariant"); |
695 assert(!mark.has_bias_pattern(), "invariant"); |
720 |
696 |
721 if (mark->is_neutral()) { |
697 if (mark.is_neutral()) { |
722 hash = mark->hash(); // this is a normal header |
698 hash = mark.hash(); // this is a normal header |
723 if (hash != 0) { // if it has hash, just return it |
699 if (hash != 0) { // if it has hash, just return it |
724 return hash; |
700 return hash; |
725 } |
701 } |
726 hash = get_next_hash(Self, obj); // allocate a new hash code |
702 hash = get_next_hash(self, obj); // allocate a new hash code |
727 temp = mark->copy_set_hash(hash); // merge the hash code into header |
703 temp = mark.copy_set_hash(hash); // merge the hash code into header |
728 // use (machine word version) atomic operation to install the hash |
704 // use (machine word version) atomic operation to install the hash |
729 test = obj->cas_set_mark(temp, mark); |
705 test = obj->cas_set_mark(temp, mark); |
730 if (test == mark) { |
706 if (test == mark) { |
731 return hash; |
707 return hash; |
732 } |
708 } |
733 // If atomic operation failed, we must inflate the header |
709 // If atomic operation failed, we must inflate the header |
734 // into heavy weight monitor. We could add more code here |
710 // into heavy weight monitor. We could add more code here |
735 // for fast path, but it does not worth the complexity. |
711 // for fast path, but it does not worth the complexity. |
736 } else if (mark->has_monitor()) { |
712 } else if (mark.has_monitor()) { |
737 monitor = mark->monitor(); |
713 monitor = mark.monitor(); |
738 temp = monitor->header(); |
714 temp = monitor->header(); |
739 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); |
715 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); |
740 hash = temp->hash(); |
716 hash = temp.hash(); |
741 if (hash != 0) { |
717 if (hash != 0) { |
742 return hash; |
718 return hash; |
743 } |
719 } |
744 // Skip to the following code to reduce code size |
720 // Skip to the following code to reduce code size |
745 } else if (Self->is_lock_owned((address)mark->locker())) { |
721 } else if (self->is_lock_owned((address)mark.locker())) { |
746 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned |
722 temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned |
747 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); |
723 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); |
748 hash = temp->hash(); // by current thread, check if the displaced |
724 hash = temp.hash(); // by current thread, check if the displaced |
749 if (hash != 0) { // header contains hash code |
725 if (hash != 0) { // header contains hash code |
750 return hash; |
726 return hash; |
751 } |
727 } |
752 // WARNING: |
728 // WARNING: |
753 // The displaced header in the BasicLock on a thread's stack |
729 // The displaced header in the BasicLock on a thread's stack |
754 // is strictly immutable. It CANNOT be changed in ANY cases. |
730 // is strictly immutable. It CANNOT be changed in ANY cases. |
828 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); |
806 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); |
829 assert(self->thread_state() != _thread_blocked, "invariant"); |
807 assert(self->thread_state() != _thread_blocked, "invariant"); |
830 |
808 |
831 // Possible mark states: neutral, biased, stack-locked, inflated |
809 // Possible mark states: neutral, biased, stack-locked, inflated |
832 |
810 |
833 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { |
811 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { |
834 // CASE: biased |
812 // CASE: biased |
835 BiasedLocking::revoke_and_rebias(h_obj, false, self); |
813 BiasedLocking::revoke(h_obj, self); |
836 assert(!h_obj->mark()->has_bias_pattern(), |
814 assert(!h_obj->mark().has_bias_pattern(), |
837 "biases should be revoked by now"); |
815 "biases should be revoked by now"); |
838 } |
816 } |
839 |
817 |
840 assert(self == JavaThread::current(), "Can only be called on current thread"); |
818 assert(self == JavaThread::current(), "Can only be called on current thread"); |
841 oop obj = h_obj(); |
819 oop obj = h_obj(); |
842 markOop mark = ReadStableMark(obj); |
820 markWord mark = read_stable_mark(obj); |
843 |
821 |
844 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. |
822 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. |
845 if (mark->has_locker()) { |
823 if (mark.has_locker()) { |
846 return self->is_lock_owned((address)mark->locker()) ? |
824 return self->is_lock_owned((address)mark.locker()) ? |
847 owner_self : owner_other; |
825 owner_self : owner_other; |
848 } |
826 } |
849 |
827 |
850 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. |
828 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. |
851 // The Object:ObjectMonitor relationship is stable as long as we're |
829 // The Object:ObjectMonitor relationship is stable as long as we're |
852 // not at a safepoint. |
830 // not at a safepoint. |
853 if (mark->has_monitor()) { |
831 if (mark.has_monitor()) { |
854 void * owner = mark->monitor()->_owner; |
832 void* owner = mark.monitor()->_owner; |
855 if (owner == NULL) return owner_none; |
833 if (owner == NULL) return owner_none; |
856 return (owner == self || |
834 return (owner == self || |
857 self->is_lock_owned((address)owner)) ? owner_self : owner_other; |
835 self->is_lock_owned((address)owner)) ? owner_self : owner_other; |
858 } |
836 } |
859 |
837 |
860 // CASE: neutral |
838 // CASE: neutral |
861 assert(mark->is_neutral(), "sanity check"); |
839 assert(mark.is_neutral(), "sanity check"); |
862 return owner_none; // it's unlocked |
840 return owner_none; // it's unlocked |
863 } |
841 } |
864 |
842 |
865 // FIXME: jvmti should call this |
843 // FIXME: jvmti should call this |
866 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { |
844 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { |
867 if (UseBiasedLocking) { |
845 if (UseBiasedLocking) { |
868 if (SafepointSynchronize::is_at_safepoint()) { |
846 if (SafepointSynchronize::is_at_safepoint()) { |
869 BiasedLocking::revoke_at_safepoint(h_obj); |
847 BiasedLocking::revoke_at_safepoint(h_obj); |
870 } else { |
848 } else { |
871 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); |
849 BiasedLocking::revoke(h_obj, JavaThread::current()); |
872 } |
850 } |
873 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
851 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); |
874 } |
852 } |
875 |
853 |
876 oop obj = h_obj(); |
854 oop obj = h_obj(); |
877 address owner = NULL; |
855 address owner = NULL; |
878 |
856 |
879 markOop mark = ReadStableMark(obj); |
857 markWord mark = read_stable_mark(obj); |
880 |
858 |
881 // Uncontended case, header points to stack |
859 // Uncontended case, header points to stack |
882 if (mark->has_locker()) { |
860 if (mark.has_locker()) { |
883 owner = (address) mark->locker(); |
861 owner = (address) mark.locker(); |
884 } |
862 } |
885 |
863 |
886 // Contended case, header points to ObjectMonitor (tagged pointer) |
864 // Contended case, header points to ObjectMonitor (tagged pointer) |
887 else if (mark->has_monitor()) { |
865 else if (mark.has_monitor()) { |
888 ObjectMonitor* monitor = mark->monitor(); |
866 ObjectMonitor* monitor = mark.monitor(); |
889 assert(monitor != NULL, "monitor should be non-null"); |
867 assert(monitor != NULL, "monitor should be non-null"); |
890 owner = (address) monitor->owner(); |
868 owner = (address) monitor->owner(); |
891 } |
869 } |
892 |
870 |
893 if (owner != NULL) { |
871 if (owner != NULL) { |
1021 // The VMThread will delete the op when completed. |
1000 // The VMThread will delete the op when completed. |
1022 VMThread::execute(new VM_ScavengeMonitors()); |
1001 VMThread::execute(new VM_ScavengeMonitors()); |
1023 } |
1002 } |
1024 } |
1003 } |
1025 |
1004 |
1026 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { |
1005 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { |
1027 // A large MAXPRIVATE value reduces both list lock contention |
1006 // A large MAXPRIVATE value reduces both list lock contention |
1028 // and list coherency traffic, but also tends to increase the |
1007 // and list coherency traffic, but also tends to increase the |
1029 // number of objectMonitors in circulation as well as the STW |
1008 // number of ObjectMonitors in circulation as well as the STW |
1030 // scavenge costs. As usual, we lean toward time in space-time |
1009 // scavenge costs. As usual, we lean toward time in space-time |
1031 // tradeoffs. |
1010 // tradeoffs. |
1032 const int MAXPRIVATE = 1024; |
1011 const int MAXPRIVATE = 1024; |
|
1012 stringStream ss; |
1033 for (;;) { |
1013 for (;;) { |
1034 ObjectMonitor * m; |
1014 ObjectMonitor* m; |
1035 |
1015 |
1036 // 1: try to allocate from the thread's local omFreeList. |
1016 // 1: try to allocate from the thread's local om_free_list. |
1037 // Threads will attempt to allocate first from their local list, then |
1017 // Threads will attempt to allocate first from their local list, then |
1038 // from the global list, and only after those attempts fail will the thread |
1018 // from the global list, and only after those attempts fail will the thread |
1039 // attempt to instantiate new monitors. Thread-local free lists take |
1019 // attempt to instantiate new monitors. Thread-local free lists take |
1040 // heat off the gListLock and improve allocation latency, as well as reducing |
1020 // heat off the gListLock and improve allocation latency, as well as reducing |
1041 // coherency traffic on the shared global list. |
1021 // coherency traffic on the shared global list. |
1042 m = Self->omFreeList; |
1022 m = self->om_free_list; |
1043 if (m != NULL) { |
1023 if (m != NULL) { |
1044 Self->omFreeList = m->FreeNext; |
1024 self->om_free_list = m->_next_om; |
1045 Self->omFreeCount--; |
1025 self->om_free_count--; |
1046 guarantee(m->object() == NULL, "invariant"); |
1026 guarantee(m->object() == NULL, "invariant"); |
1047 m->FreeNext = Self->omInUseList; |
1027 m->_next_om = self->om_in_use_list; |
1048 Self->omInUseList = m; |
1028 self->om_in_use_list = m; |
1049 Self->omInUseCount++; |
1029 self->om_in_use_count++; |
1050 return m; |
1030 return m; |
1051 } |
1031 } |
1052 |
1032 |
1053 // 2: try to allocate from the global gFreeList |
1033 // 2: try to allocate from the global g_free_list |
1054 // CONSIDER: use muxTry() instead of muxAcquire(). |
1034 // CONSIDER: use muxTry() instead of muxAcquire(). |
1055 // If the muxTry() fails then drop immediately into case 3. |
1035 // If the muxTry() fails then drop immediately into case 3. |
1056 // If we're using thread-local free lists then try |
1036 // If we're using thread-local free lists then try |
1057 // to reprovision the caller's free list. |
1037 // to reprovision the caller's free list. |
1058 if (gFreeList != NULL) { |
1038 if (g_free_list != NULL) { |
1059 // Reprovision the thread's omFreeList. |
1039 // Reprovision the thread's om_free_list. |
1060 // Use bulk transfers to reduce the allocation rate and heat |
1040 // Use bulk transfers to reduce the allocation rate and heat |
1061 // on various locks. |
1041 // on various locks. |
1062 Thread::muxAcquire(&gListLock, "omAlloc(1)"); |
1042 Thread::muxAcquire(&gListLock, "om_alloc(1)"); |
1063 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { |
1043 for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) { |
1064 gMonitorFreeCount--; |
1044 g_om_free_count--; |
1065 ObjectMonitor * take = gFreeList; |
1045 ObjectMonitor* take = g_free_list; |
1066 gFreeList = take->FreeNext; |
1046 g_free_list = take->_next_om; |
1067 guarantee(take->object() == NULL, "invariant"); |
1047 guarantee(take->object() == NULL, "invariant"); |
1068 guarantee(!take->is_busy(), "invariant"); |
|
1069 take->Recycle(); |
1048 take->Recycle(); |
1070 omRelease(Self, take, false); |
1049 om_release(self, take, false); |
1071 } |
1050 } |
1072 Thread::muxRelease(&gListLock); |
1051 Thread::muxRelease(&gListLock); |
1073 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); |
1052 self->om_free_provision += 1 + (self->om_free_provision/2); |
1074 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; |
1053 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; |
1075 |
1054 |
1076 const int mx = MonitorBound; |
1055 const int mx = MonitorBound; |
1077 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { |
1056 if (mx > 0 && (g_om_population-g_om_free_count) > mx) { |
1078 // We can't safely induce a STW safepoint from omAlloc() as our thread |
1057 // Not enough ObjectMonitors on the global free list. |
|
1058 // We can't safely induce a STW safepoint from om_alloc() as our thread |
1079 // state may not be appropriate for such activities and callers may hold |
1059 // state may not be appropriate for such activities and callers may hold |
1080 // naked oops, so instead we defer the action. |
1060 // naked oops, so instead we defer the action. |
1081 InduceScavenge(Self, "omAlloc"); |
1061 InduceScavenge(self, "om_alloc"); |
1082 } |
1062 } |
1083 continue; |
1063 continue; |
1084 } |
1064 } |
1085 |
1065 |
1086 // 3: allocate a block of new ObjectMonitors |
1066 // 3: allocate a block of new ObjectMonitors |
1087 // Both the local and global free lists are empty -- resort to malloc(). |
1067 // Both the local and global free lists are empty -- resort to malloc(). |
1088 // In the current implementation objectMonitors are TSM - immortal. |
1068 // In the current implementation ObjectMonitors are TSM - immortal. |
1089 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want |
1069 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want |
1090 // each ObjectMonitor to start at the beginning of a cache line, |
1070 // each ObjectMonitor to start at the beginning of a cache line, |
1091 // so we use align_up(). |
1071 // so we use align_up(). |
1092 // A better solution would be to use C++ placement-new. |
1072 // A better solution would be to use C++ placement-new. |
1093 // BEWARE: As it stands currently, we don't run the ctors! |
1073 // BEWARE: As it stands currently, we don't run the ctors! |
1094 assert(_BLOCKSIZE > 1, "invariant"); |
1074 assert(_BLOCKSIZE > 1, "invariant"); |
1095 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; |
1075 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; |
1096 PaddedEnd<ObjectMonitor> * temp; |
1076 PaddedObjectMonitor* temp; |
1097 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); |
1077 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); |
1098 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, |
1078 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); |
1099 mtInternal); |
1079 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); |
1100 temp = (PaddedEnd<ObjectMonitor> *) |
|
1101 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); |
|
1102 |
|
1103 // NOTE: (almost) no way to recover if allocation failed. |
|
1104 // We might be able to induce a STW safepoint and scavenge enough |
|
1105 // objectMonitors to permit progress. |
|
1106 if (temp == NULL) { |
|
1107 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, |
|
1108 "Allocate ObjectMonitors"); |
|
1109 } |
|
1110 (void)memset((void *) temp, 0, neededsize); |
1080 (void)memset((void *) temp, 0, neededsize); |
1111 |
1081 |
1112 // Format the block. |
1082 // Format the block. |
1113 // initialize the linked list, each monitor points to its next |
1083 // initialize the linked list, each monitor points to its next |
1114 // forming the single linked free list, the very first monitor |
1084 // forming the single linked free list, the very first monitor |
1115 // will points to next block, which forms the block list. |
1085 // will points to next block, which forms the block list. |
1116 // The trick of using the 1st element in the block as gBlockList |
1086 // The trick of using the 1st element in the block as g_block_list |
1117 // linkage should be reconsidered. A better implementation would |
1087 // linkage should be reconsidered. A better implementation would |
1118 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } |
1088 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } |
1119 |
1089 |
1120 for (int i = 1; i < _BLOCKSIZE; i++) { |
1090 for (int i = 1; i < _BLOCKSIZE; i++) { |
1121 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; |
1091 temp[i]._next_om = (ObjectMonitor *)&temp[i+1]; |
1122 } |
1092 } |
1123 |
1093 |
1124 // terminate the last monitor as the end of list |
1094 // terminate the last monitor as the end of list |
1125 temp[_BLOCKSIZE - 1].FreeNext = NULL; |
1095 temp[_BLOCKSIZE - 1]._next_om = NULL; |
1126 |
1096 |
1127 // Element [0] is reserved for global list linkage |
1097 // Element [0] is reserved for global list linkage |
1128 temp[0].set_object(CHAINMARKER); |
1098 temp[0].set_object(CHAINMARKER); |
1129 |
1099 |
1130 // Consider carving out this thread's current request from the |
1100 // Consider carving out this thread's current request from the |
1131 // block in hand. This avoids some lock traffic and redundant |
1101 // block in hand. This avoids some lock traffic and redundant |
1132 // list activity. |
1102 // list activity. |
1133 |
1103 |
1134 // Acquire the gListLock to manipulate gBlockList and gFreeList. |
1104 // Acquire the gListLock to manipulate g_block_list and g_free_list. |
1135 // An Oyama-Taura-Yonezawa scheme might be more efficient. |
1105 // An Oyama-Taura-Yonezawa scheme might be more efficient. |
1136 Thread::muxAcquire(&gListLock, "omAlloc(2)"); |
1106 Thread::muxAcquire(&gListLock, "om_alloc(2)"); |
1137 gMonitorPopulation += _BLOCKSIZE-1; |
1107 g_om_population += _BLOCKSIZE-1; |
1138 gMonitorFreeCount += _BLOCKSIZE-1; |
1108 g_om_free_count += _BLOCKSIZE-1; |
1139 |
1109 |
1140 // Add the new block to the list of extant blocks (gBlockList). |
1110 // Add the new block to the list of extant blocks (g_block_list). |
1141 // The very first objectMonitor in a block is reserved and dedicated. |
1111 // The very first ObjectMonitor in a block is reserved and dedicated. |
1142 // It serves as blocklist "next" linkage. |
1112 // It serves as blocklist "next" linkage. |
1143 temp[0].FreeNext = gBlockList; |
1113 temp[0]._next_om = g_block_list; |
1144 // There are lock-free uses of gBlockList so make sure that |
1114 // There are lock-free uses of g_block_list so make sure that |
1145 // the previous stores happen before we update gBlockList. |
1115 // the previous stores happen before we update g_block_list. |
1146 OrderAccess::release_store(&gBlockList, temp); |
1116 OrderAccess::release_store(&g_block_list, temp); |
1147 |
1117 |
1148 // Add the new string of objectMonitors to the global free list |
1118 // Add the new string of ObjectMonitors to the global free list |
1149 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; |
1119 temp[_BLOCKSIZE - 1]._next_om = g_free_list; |
1150 gFreeList = temp + 1; |
1120 g_free_list = temp + 1; |
1151 Thread::muxRelease(&gListLock); |
1121 Thread::muxRelease(&gListLock); |
1152 } |
1122 } |
1153 } |
1123 } |
1154 |
1124 |
1155 // Place "m" on the caller's private per-thread omFreeList. |
1125 // Place "m" on the caller's private per-thread om_free_list. |
1156 // In practice there's no need to clamp or limit the number of |
1126 // In practice there's no need to clamp or limit the number of |
1157 // monitors on a thread's omFreeList as the only time we'll call |
1127 // monitors on a thread's om_free_list as the only non-allocation time |
1158 // omRelease is to return a monitor to the free list after a CAS |
1128 // we'll call om_release() is to return a monitor to the free list after |
1159 // attempt failed. This doesn't allow unbounded #s of monitors to |
1129 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to |
1160 // accumulate on a thread's free list. |
1130 // accumulate on a thread's free list. |
1161 // |
1131 // |
1162 // Key constraint: all ObjectMonitors on a thread's free list and the global |
1132 // Key constraint: all ObjectMonitors on a thread's free list and the global |
1163 // free list must have their object field set to null. This prevents the |
1133 // free list must have their object field set to null. This prevents the |
1164 // scavenger -- deflate_monitor_list() -- from reclaiming them. |
1134 // scavenger -- deflate_monitor_list() -- from reclaiming them while we |
1165 |
1135 // are trying to release them. |
1166 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, |
1136 |
1167 bool fromPerThreadAlloc) { |
1137 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, |
1168 guarantee(m->header() == NULL, "invariant"); |
1138 bool from_per_thread_alloc) { |
|
1139 guarantee(m->header().value() == 0, "invariant"); |
1169 guarantee(m->object() == NULL, "invariant"); |
1140 guarantee(m->object() == NULL, "invariant"); |
1170 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); |
1141 stringStream ss; |
1171 // Remove from omInUseList |
1142 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " |
1172 if (fromPerThreadAlloc) { |
1143 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss), |
|
1144 m->_recursions); |
|
1145 // _next_om is used for both per-thread in-use and free lists so |
|
1146 // we have to remove 'm' from the in-use list first (as needed). |
|
1147 if (from_per_thread_alloc) { |
|
1148 // Need to remove 'm' from om_in_use_list. |
1173 ObjectMonitor* cur_mid_in_use = NULL; |
1149 ObjectMonitor* cur_mid_in_use = NULL; |
1174 bool extracted = false; |
1150 bool extracted = false; |
1175 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { |
1151 for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) { |
1176 if (m == mid) { |
1152 if (m == mid) { |
1177 // extract from per-thread in-use list |
1153 // extract from per-thread in-use list |
1178 if (mid == Self->omInUseList) { |
1154 if (mid == self->om_in_use_list) { |
1179 Self->omInUseList = mid->FreeNext; |
1155 self->om_in_use_list = mid->_next_om; |
1180 } else if (cur_mid_in_use != NULL) { |
1156 } else if (cur_mid_in_use != NULL) { |
1181 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list |
1157 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list |
1182 } |
1158 } |
1183 extracted = true; |
1159 extracted = true; |
1184 Self->omInUseCount--; |
1160 self->om_in_use_count--; |
1185 break; |
1161 break; |
1186 } |
1162 } |
1187 } |
1163 } |
1188 assert(extracted, "Should have extracted from in-use list"); |
1164 assert(extracted, "Should have extracted from in-use list"); |
1189 } |
1165 } |
1190 |
1166 |
1191 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new |
1167 m->_next_om = self->om_free_list; |
1192 m->FreeNext = Self->omFreeList; |
1168 self->om_free_list = m; |
1193 Self->omFreeList = m; |
1169 self->om_free_count++; |
1194 Self->omFreeCount++; |
1170 } |
1195 } |
1171 |
1196 |
1172 // Return ObjectMonitors on a moribund thread's free and in-use |
1197 // Return the monitors of a moribund thread's local free list to |
1173 // lists to the appropriate global lists. The ObjectMonitors on the |
1198 // the global free list. Typically a thread calls omFlush() when |
1174 // per-thread in-use list may still be in use by other threads. |
1199 // it's dying. We could also consider having the VM thread steal |
|
1200 // monitors from threads that have not run java code over a few |
|
1201 // consecutive STW safepoints. Relatedly, we might decay |
|
1202 // omFreeProvision at STW safepoints. |
|
1203 // |
1175 // |
1204 // Also return the monitors of a moribund thread's omInUseList to |
1176 // We currently call om_flush() from Threads::remove() before the |
1205 // a global gOmInUseList under the global list lock so these |
1177 // thread has been excised from the thread list and is no longer a |
1206 // will continue to be scanned. |
1178 // mutator. This means that om_flush() cannot run concurrently with |
1207 // |
1179 // a safepoint and interleave with deflate_idle_monitors(). In |
1208 // We currently call omFlush() from Threads::remove() _before the thread |
1180 // particular, this ensures that the thread's in-use monitors are |
1209 // has been excised from the thread list and is no longer a mutator. |
1181 // scanned by a GC safepoint, either via Thread::oops_do() (before |
1210 // This means that omFlush() cannot run concurrently with a safepoint and |
1182 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after |
1211 // interleave with the deflate_idle_monitors scavenge operator. In particular, |
1183 // om_flush() is called). |
1212 // this ensures that the thread's monitors are scanned by a GC safepoint, |
1184 |
1213 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via |
1185 void ObjectSynchronizer::om_flush(Thread* self) { |
1214 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's |
1186 ObjectMonitor* free_list = self->om_free_list; |
1215 // monitors have been transferred to the global in-use list). |
1187 ObjectMonitor* free_tail = NULL; |
1216 |
1188 int free_count = 0; |
1217 void ObjectSynchronizer::omFlush(Thread * Self) { |
1189 if (free_list != NULL) { |
1218 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL |
1190 ObjectMonitor* s; |
1219 ObjectMonitor * tail = NULL; |
1191 // The thread is going away. Set 'free_tail' to the last per-thread free |
1220 int tally = 0; |
1192 // monitor which will be linked to g_free_list below under the gListLock. |
1221 if (list != NULL) { |
1193 stringStream ss; |
1222 ObjectMonitor * s; |
1194 for (s = free_list; s != NULL; s = s->_next_om) { |
1223 // The thread is going away, the per-thread free monitors |
1195 free_count++; |
1224 // are freed via set_owner(NULL) |
1196 free_tail = s; |
1225 // Link them to tail, which will be linked into the global free list |
|
1226 // gFreeList below, under the gListLock |
|
1227 for (s = list; s != NULL; s = s->FreeNext) { |
|
1228 tally++; |
|
1229 tail = s; |
|
1230 guarantee(s->object() == NULL, "invariant"); |
1197 guarantee(s->object() == NULL, "invariant"); |
1231 guarantee(!s->is_busy(), "invariant"); |
1198 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); |
1232 s->set_owner(NULL); // redundant but good hygiene |
1199 } |
1233 } |
1200 guarantee(free_tail != NULL, "invariant"); |
1234 guarantee(tail != NULL, "invariant"); |
1201 assert(self->om_free_count == free_count, "free-count off"); |
1235 assert(Self->omFreeCount == tally, "free-count off"); |
1202 self->om_free_list = NULL; |
1236 Self->omFreeList = NULL; |
1203 self->om_free_count = 0; |
1237 Self->omFreeCount = 0; |
1204 } |
1238 } |
1205 |
1239 |
1206 ObjectMonitor* in_use_list = self->om_in_use_list; |
1240 ObjectMonitor * inUseList = Self->omInUseList; |
1207 ObjectMonitor* in_use_tail = NULL; |
1241 ObjectMonitor * inUseTail = NULL; |
1208 int in_use_count = 0; |
1242 int inUseTally = 0; |
1209 if (in_use_list != NULL) { |
1243 if (inUseList != NULL) { |
1210 // The thread is going away, however the ObjectMonitors on the |
|
1211 // om_in_use_list may still be in-use by other threads. Link |
|
1212 // them to in_use_tail, which will be linked into the global |
|
1213 // in-use list g_om_in_use_list below, under the gListLock. |
1244 ObjectMonitor *cur_om; |
1214 ObjectMonitor *cur_om; |
1245 // The thread is going away, however the omInUseList inflated |
1215 for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) { |
1246 // monitors may still be in-use by other threads. |
1216 in_use_tail = cur_om; |
1247 // Link them to inUseTail, which will be linked into the global in-use list |
1217 in_use_count++; |
1248 // gOmInUseList below, under the gListLock |
1218 } |
1249 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { |
1219 guarantee(in_use_tail != NULL, "invariant"); |
1250 inUseTail = cur_om; |
1220 assert(self->om_in_use_count == in_use_count, "in-use count off"); |
1251 inUseTally++; |
1221 self->om_in_use_list = NULL; |
1252 } |
1222 self->om_in_use_count = 0; |
1253 guarantee(inUseTail != NULL, "invariant"); |
1223 } |
1254 assert(Self->omInUseCount == inUseTally, "in-use count off"); |
1224 |
1255 Self->omInUseList = NULL; |
1225 Thread::muxAcquire(&gListLock, "om_flush"); |
1256 Self->omInUseCount = 0; |
1226 if (free_tail != NULL) { |
1257 } |
1227 free_tail->_next_om = g_free_list; |
1258 |
1228 g_free_list = free_list; |
1259 Thread::muxAcquire(&gListLock, "omFlush"); |
1229 g_om_free_count += free_count; |
1260 if (tail != NULL) { |
1230 } |
1261 tail->FreeNext = gFreeList; |
1231 |
1262 gFreeList = list; |
1232 if (in_use_tail != NULL) { |
1263 gMonitorFreeCount += tally; |
1233 in_use_tail->_next_om = g_om_in_use_list; |
1264 } |
1234 g_om_in_use_list = in_use_list; |
1265 |
1235 g_om_in_use_count += in_use_count; |
1266 if (inUseTail != NULL) { |
|
1267 inUseTail->FreeNext = gOmInUseList; |
|
1268 gOmInUseList = inUseList; |
|
1269 gOmInUseCount += inUseTally; |
|
1270 } |
1236 } |
1271 |
1237 |
1272 Thread::muxRelease(&gListLock); |
1238 Thread::muxRelease(&gListLock); |
1273 |
1239 |
1274 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1240 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1275 LogStreamHandle(Info, monitorinflation) lsh_info; |
1241 LogStreamHandle(Info, monitorinflation) lsh_info; |
1276 LogStream * ls = NULL; |
1242 LogStream* ls = NULL; |
1277 if (log_is_enabled(Debug, monitorinflation)) { |
1243 if (log_is_enabled(Debug, monitorinflation)) { |
1278 ls = &lsh_debug; |
1244 ls = &lsh_debug; |
1279 } else if ((tally != 0 || inUseTally != 0) && |
1245 } else if ((free_count != 0 || in_use_count != 0) && |
1280 log_is_enabled(Info, monitorinflation)) { |
1246 log_is_enabled(Info, monitorinflation)) { |
1281 ls = &lsh_info; |
1247 ls = &lsh_info; |
1282 } |
1248 } |
1283 if (ls != NULL) { |
1249 if (ls != NULL) { |
1284 ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d" |
1250 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" |
1285 ", in_use_monitor_tally=%d" ", omFreeProvision=%d", |
1251 ", in_use_count=%d" ", om_free_provision=%d", |
1286 p2i(Self), tally, inUseTally, Self->omFreeProvision); |
1252 p2i(self), free_count, in_use_count, self->om_free_provision); |
1287 } |
1253 } |
1288 } |
1254 } |
1289 |
1255 |
1290 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, |
1256 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, |
1291 const oop obj, |
1257 const oop obj, |
1298 event->commit(); |
1264 event->commit(); |
1299 } |
1265 } |
1300 |
1266 |
1301 // Fast path code shared by multiple functions |
1267 // Fast path code shared by multiple functions |
1302 void ObjectSynchronizer::inflate_helper(oop obj) { |
1268 void ObjectSynchronizer::inflate_helper(oop obj) { |
1303 markOop mark = obj->mark(); |
1269 markWord mark = obj->mark(); |
1304 if (mark->has_monitor()) { |
1270 if (mark.has_monitor()) { |
1305 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); |
1271 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid"); |
1306 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); |
1272 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header"); |
1307 return; |
1273 return; |
1308 } |
1274 } |
1309 inflate(Thread::current(), obj, inflate_cause_vm_internal); |
1275 inflate(Thread::current(), obj, inflate_cause_vm_internal); |
1310 } |
1276 } |
1311 |
1277 |
1312 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, |
1278 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, |
1313 oop object, |
1279 oop object, |
1314 const InflateCause cause) { |
1280 const InflateCause cause) { |
1315 // Inflate mutates the heap ... |
1281 // Inflate mutates the heap ... |
1316 // Relaxing assertion for bug 6320749. |
1282 // Relaxing assertion for bug 6320749. |
1317 assert(Universe::verify_in_progress() || |
1283 assert(Universe::verify_in_progress() || |
1318 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
1284 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
1319 |
1285 |
1320 EventJavaMonitorInflate event; |
1286 EventJavaMonitorInflate event; |
1321 |
1287 |
1322 for (;;) { |
1288 for (;;) { |
1323 const markOop mark = object->mark(); |
1289 const markWord mark = object->mark(); |
1324 assert(!mark->has_bias_pattern(), "invariant"); |
1290 assert(!mark.has_bias_pattern(), "invariant"); |
1325 |
1291 |
1326 // The mark can be in one of the following states: |
1292 // The mark can be in one of the following states: |
1327 // * Inflated - just return |
1293 // * Inflated - just return |
1328 // * Stack-locked - coerce it to inflated |
1294 // * Stack-locked - coerce it to inflated |
1329 // * INFLATING - busy wait for conversion to complete |
1295 // * INFLATING - busy wait for conversion to complete |
1330 // * Neutral - aggressively inflate the object. |
1296 // * Neutral - aggressively inflate the object. |
1331 // * BIASED - Illegal. We should never see this |
1297 // * BIASED - Illegal. We should never see this |
1332 |
1298 |
1333 // CASE: inflated |
1299 // CASE: inflated |
1334 if (mark->has_monitor()) { |
1300 if (mark.has_monitor()) { |
1335 ObjectMonitor * inf = mark->monitor(); |
1301 ObjectMonitor* inf = mark.monitor(); |
1336 markOop dmw = inf->header(); |
1302 markWord dmw = inf->header(); |
1337 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); |
1303 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); |
1338 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); |
1304 assert(inf->object() == object, "invariant"); |
1339 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); |
1305 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); |
1340 return inf; |
1306 return inf; |
1341 } |
1307 } |
1342 |
1308 |
1343 // CASE: inflation in progress - inflating over a stack-lock. |
1309 // CASE: inflation in progress - inflating over a stack-lock. |
1344 // Some other thread is converting from stack-locked to inflated. |
1310 // Some other thread is converting from stack-locked to inflated. |
1345 // Only that thread can complete inflation -- other threads must wait. |
1311 // Only that thread can complete inflation -- other threads must wait. |
1346 // The INFLATING value is transient. |
1312 // The INFLATING value is transient. |
1347 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. |
1313 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. |
1348 // We could always eliminate polling by parking the thread on some auxiliary list. |
1314 // We could always eliminate polling by parking the thread on some auxiliary list. |
1349 if (mark == markOopDesc::INFLATING()) { |
1315 if (mark == markWord::INFLATING()) { |
1350 ReadStableMark(object); |
1316 read_stable_mark(object); |
1351 continue; |
1317 continue; |
1352 } |
1318 } |
1353 |
1319 |
1354 // CASE: stack-locked |
1320 // CASE: stack-locked |
1355 // Could be stack-locked either by this thread or by some other thread. |
1321 // Could be stack-locked either by this thread or by some other thread. |
1395 // |
1360 // |
1396 // Why do we CAS a 0 into the mark-word instead of just CASing the |
1361 // Why do we CAS a 0 into the mark-word instead of just CASing the |
1397 // mark-word from the stack-locked value directly to the new inflated state? |
1362 // mark-word from the stack-locked value directly to the new inflated state? |
1398 // Consider what happens when a thread unlocks a stack-locked object. |
1363 // Consider what happens when a thread unlocks a stack-locked object. |
1399 // It attempts to use CAS to swing the displaced header value from the |
1364 // It attempts to use CAS to swing the displaced header value from the |
1400 // on-stack basiclock back into the object header. Recall also that the |
1365 // on-stack BasicLock back into the object header. Recall also that the |
1401 // header value (hash code, etc) can reside in (a) the object header, or |
1366 // header value (hash code, etc) can reside in (a) the object header, or |
1402 // (b) a displaced header associated with the stack-lock, or (c) a displaced |
1367 // (b) a displaced header associated with the stack-lock, or (c) a displaced |
1403 // header in an objectMonitor. The inflate() routine must copy the header |
1368 // header in an ObjectMonitor. The inflate() routine must copy the header |
1404 // value from the basiclock on the owner's stack to the objectMonitor, all |
1369 // value from the BasicLock on the owner's stack to the ObjectMonitor, all |
1405 // the while preserving the hashCode stability invariants. If the owner |
1370 // the while preserving the hashCode stability invariants. If the owner |
1406 // decides to release the lock while the value is 0, the unlock will fail |
1371 // decides to release the lock while the value is 0, the unlock will fail |
1407 // and control will eventually pass from slow_exit() to inflate. The owner |
1372 // and control will eventually pass from slow_exit() to inflate. The owner |
1408 // will then spin, waiting for the 0 value to disappear. Put another way, |
1373 // will then spin, waiting for the 0 value to disappear. Put another way, |
1409 // the 0 causes the owner to stall if the owner happens to try to |
1374 // the 0 causes the owner to stall if the owner happens to try to |
1410 // drop the lock (restoring the header from the basiclock to the object) |
1375 // drop the lock (restoring the header from the BasicLock to the object) |
1411 // while inflation is in-progress. This protocol avoids races that might |
1376 // while inflation is in-progress. This protocol avoids races that might |
1412 // would otherwise permit hashCode values to change or "flicker" for an object. |
1377 // would otherwise permit hashCode values to change or "flicker" for an object. |
1413 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. |
1378 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. |
1414 // 0 serves as a "BUSY" inflate-in-progress indicator. |
1379 // 0 serves as a "BUSY" inflate-in-progress indicator. |
1415 |
1380 |
1416 |
1381 |
1417 // fetch the displaced mark from the owner's stack. |
1382 // fetch the displaced mark from the owner's stack. |
1418 // The owner can't die or unwind past the lock while our INFLATING |
1383 // The owner can't die or unwind past the lock while our INFLATING |
1419 // object is in the mark. Furthermore the owner can't complete |
1384 // object is in the mark. Furthermore the owner can't complete |
1420 // an unlock on the object, either. |
1385 // an unlock on the object, either. |
1421 markOop dmw = mark->displaced_mark_helper(); |
1386 markWord dmw = mark.displaced_mark_helper(); |
1422 // Catch if the object's header is not neutral (not locked and |
1387 // Catch if the object's header is not neutral (not locked and |
1423 // not marked is what we care about here). |
1388 // not marked is what we care about here). |
1424 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); |
1389 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); |
1425 |
1390 |
1426 // Setup monitor fields to proper values -- prepare the monitor |
1391 // Setup monitor fields to proper values -- prepare the monitor |
1427 m->set_header(dmw); |
1392 m->set_header(dmw); |
1428 |
1393 |
1429 // Optimization: if the mark->locker stack address is associated |
1394 // Optimization: if the mark.locker stack address is associated |
1430 // with this thread we could simply set m->_owner = Self. |
1395 // with this thread we could simply set m->_owner = self. |
1431 // Note that a thread can inflate an object |
1396 // Note that a thread can inflate an object |
1432 // that it has stack-locked -- as might happen in wait() -- directly |
1397 // that it has stack-locked -- as might happen in wait() -- directly |
1433 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. |
1398 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. |
1434 m->set_owner(mark->locker()); |
1399 m->set_owner(mark.locker()); |
1435 m->set_object(object); |
1400 m->set_object(object); |
1436 // TODO-FIXME: assert BasicLock->dhw != 0. |
1401 // TODO-FIXME: assert BasicLock->dhw != 0. |
1437 |
1402 |
1438 // Must preserve store ordering. The monitor state must |
1403 // Must preserve store ordering. The monitor state must |
1439 // be stable at the time of publishing the monitor address. |
1404 // be stable at the time of publishing the monitor address. |
1440 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); |
1405 guarantee(object->mark() == markWord::INFLATING(), "invariant"); |
1441 object->release_set_mark(markOopDesc::encode(m)); |
1406 object->release_set_mark(markWord::encode(m)); |
1442 |
1407 |
1443 // Hopefully the performance counters are allocated on distinct cache lines |
1408 // Hopefully the performance counters are allocated on distinct cache lines |
1444 // to avoid false sharing on MP systems ... |
1409 // to avoid false sharing on MP systems ... |
1445 OM_PERFDATA_OP(Inflations, inc()); |
1410 OM_PERFDATA_OP(Inflations, inc()); |
1446 if (log_is_enabled(Trace, monitorinflation)) { |
1411 if (log_is_enabled(Trace, monitorinflation)) { |
1447 ResourceMark rm(Self); |
1412 ResourceMark rm(self); |
1448 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" |
1413 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" |
1449 INTPTR_FORMAT ", type='%s'", p2i(object), |
1414 INTPTR_FORMAT ", type='%s'", p2i(object), |
1450 p2i(object->mark()), object->klass()->external_name()); |
1415 object->mark().value(), object->klass()->external_name()); |
1451 } |
1416 } |
1452 if (event.should_commit()) { |
1417 if (event.should_commit()) { |
1453 post_monitor_inflate_event(&event, object, cause); |
1418 post_monitor_inflate_event(&event, object, cause); |
1454 } |
1419 } |
1455 return m; |
1420 return m; |
1456 } |
1421 } |
1457 |
1422 |
1458 // CASE: neutral |
1423 // CASE: neutral |
1459 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. |
1424 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. |
1460 // If we know we're inflating for entry it's better to inflate by swinging a |
1425 // If we know we're inflating for entry it's better to inflate by swinging a |
1461 // pre-locked objectMonitor pointer into the object header. A successful |
1426 // pre-locked ObjectMonitor pointer into the object header. A successful |
1462 // CAS inflates the object *and* confers ownership to the inflating thread. |
1427 // CAS inflates the object *and* confers ownership to the inflating thread. |
1463 // In the current implementation we use a 2-step mechanism where we CAS() |
1428 // In the current implementation we use a 2-step mechanism where we CAS() |
1464 // to inflate and then CAS() again to try to swing _owner from NULL to Self. |
1429 // to inflate and then CAS() again to try to swing _owner from NULL to self. |
1465 // An inflateTry() method that we could call from fast_enter() and slow_enter() |
1430 // An inflateTry() method that we could call from enter() would be useful. |
1466 // would be useful. |
|
1467 |
1431 |
1468 // Catch if the object's header is not neutral (not locked and |
1432 // Catch if the object's header is not neutral (not locked and |
1469 // not marked is what we care about here). |
1433 // not marked is what we care about here). |
1470 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); |
1434 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); |
1471 ObjectMonitor * m = omAlloc(Self); |
1435 ObjectMonitor* m = om_alloc(self); |
1472 // prepare m for installation - set monitor to initial state |
1436 // prepare m for installation - set monitor to initial state |
1473 m->Recycle(); |
1437 m->Recycle(); |
1474 m->set_header(mark); |
1438 m->set_header(mark); |
1475 m->set_owner(NULL); |
|
1476 m->set_object(object); |
1439 m->set_object(object); |
1477 m->_recursions = 0; |
|
1478 m->_Responsible = NULL; |
1440 m->_Responsible = NULL; |
1479 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class |
1441 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class |
1480 |
1442 |
1481 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { |
1443 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { |
1482 m->set_header(NULL); |
1444 m->set_header(markWord::zero()); |
1483 m->set_object(NULL); |
1445 m->set_object(NULL); |
1484 m->Recycle(); |
1446 m->Recycle(); |
1485 omRelease(Self, m, true); |
1447 om_release(self, m, true); |
1486 m = NULL; |
1448 m = NULL; |
1487 continue; |
1449 continue; |
1488 // interference - the markword changed - just retry. |
1450 // interference - the markword changed - just retry. |
1489 // The state-transitions are one-way, so there's no chance of |
1451 // The state-transitions are one-way, so there's no chance of |
1490 // live-lock -- "Inflated" is an absorbing state. |
1452 // live-lock -- "Inflated" is an absorbing state. |
1590 // process the same monitor lists concurrently. |
1560 // process the same monitor lists concurrently. |
1591 // |
1561 // |
1592 // See also ParallelSPCleanupTask and |
1562 // See also ParallelSPCleanupTask and |
1593 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and |
1563 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and |
1594 // Threads::parallel_java_threads_do() in thread.cpp. |
1564 // Threads::parallel_java_threads_do() in thread.cpp. |
1595 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, |
1565 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, |
1596 ObjectMonitor** freeHeadp, |
1566 ObjectMonitor** free_head_p, |
1597 ObjectMonitor** freeTailp) { |
1567 ObjectMonitor** free_tail_p) { |
1598 ObjectMonitor* mid; |
1568 ObjectMonitor* mid; |
1599 ObjectMonitor* next; |
1569 ObjectMonitor* next; |
1600 ObjectMonitor* cur_mid_in_use = NULL; |
1570 ObjectMonitor* cur_mid_in_use = NULL; |
1601 int deflated_count = 0; |
1571 int deflated_count = 0; |
1602 |
1572 |
1603 for (mid = *listHeadp; mid != NULL;) { |
1573 for (mid = *list_p; mid != NULL;) { |
1604 oop obj = (oop) mid->object(); |
1574 oop obj = (oop) mid->object(); |
1605 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { |
1575 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { |
1606 // if deflate_monitor succeeded, |
1576 // Deflation succeeded and already updated free_head_p and |
1607 // extract from per-thread in-use list |
1577 // free_tail_p as needed. Finish the move to the local free list |
1608 if (mid == *listHeadp) { |
1578 // by unlinking mid from the global or per-thread in-use list. |
1609 *listHeadp = mid->FreeNext; |
1579 if (mid == *list_p) { |
|
1580 *list_p = mid->_next_om; |
1610 } else if (cur_mid_in_use != NULL) { |
1581 } else if (cur_mid_in_use != NULL) { |
1611 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list |
1582 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list |
1612 } |
1583 } |
1613 next = mid->FreeNext; |
1584 next = mid->_next_om; |
1614 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list |
1585 mid->_next_om = NULL; // This mid is current tail in the free_head_p list |
1615 mid = next; |
1586 mid = next; |
1616 deflated_count++; |
1587 deflated_count++; |
1617 } else { |
1588 } else { |
1618 cur_mid_in_use = mid; |
1589 cur_mid_in_use = mid; |
1619 mid = mid->FreeNext; |
1590 mid = mid->_next_om; |
1620 } |
1591 } |
1621 } |
1592 } |
1622 return deflated_count; |
1593 return deflated_count; |
1623 } |
1594 } |
1624 |
1595 |
1625 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1596 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1626 counters->nInuse = 0; // currently associated with objects |
1597 counters->n_in_use = 0; // currently associated with objects |
1627 counters->nInCirculation = 0; // extant |
1598 counters->n_in_circulation = 0; // extant |
1628 counters->nScavenged = 0; // reclaimed (global and per-thread) |
1599 counters->n_scavenged = 0; // reclaimed (global and per-thread) |
1629 counters->perThreadScavenged = 0; // per-thread scavenge total |
1600 counters->per_thread_scavenged = 0; // per-thread scavenge total |
1630 counters->perThreadTimes = 0.0; // per-thread scavenge times |
1601 counters->per_thread_times = 0.0; // per-thread scavenge times |
1631 } |
1602 } |
1632 |
1603 |
1633 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1604 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1634 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
1605 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
1635 bool deflated = false; |
1606 bool deflated = false; |
1636 |
1607 |
1637 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors |
1608 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors |
1638 ObjectMonitor * freeTailp = NULL; |
1609 ObjectMonitor* free_tail_p = NULL; |
1639 elapsedTimer timer; |
1610 elapsedTimer timer; |
1640 |
1611 |
1641 if (log_is_enabled(Info, monitorinflation)) { |
1612 if (log_is_enabled(Info, monitorinflation)) { |
1642 timer.start(); |
1613 timer.start(); |
1643 } |
1614 } |
1644 |
1615 |
1645 // Prevent omFlush from changing mids in Thread dtor's during deflation |
1616 // Prevent om_flush from changing mids in Thread dtor's during deflation |
1646 // And in case the vm thread is acquiring a lock during a safepoint |
1617 // And in case the vm thread is acquiring a lock during a safepoint |
1647 // See e.g. 6320749 |
1618 // See e.g. 6320749 |
1648 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); |
1619 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); |
1649 |
1620 |
1650 // Note: the thread-local monitors lists get deflated in |
1621 // Note: the thread-local monitors lists get deflated in |
1651 // a separate pass. See deflate_thread_local_monitors(). |
1622 // a separate pass. See deflate_thread_local_monitors(). |
1652 |
1623 |
1653 // For moribund threads, scan gOmInUseList |
1624 // For moribund threads, scan g_om_in_use_list |
1654 int deflated_count = 0; |
1625 int deflated_count = 0; |
1655 if (gOmInUseList) { |
1626 if (g_om_in_use_list) { |
1656 counters->nInCirculation += gOmInUseCount; |
1627 counters->n_in_circulation += g_om_in_use_count; |
1657 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); |
1628 deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p); |
1658 gOmInUseCount -= deflated_count; |
1629 g_om_in_use_count -= deflated_count; |
1659 counters->nScavenged += deflated_count; |
1630 counters->n_scavenged += deflated_count; |
1660 counters->nInuse += gOmInUseCount; |
1631 counters->n_in_use += g_om_in_use_count; |
1661 } |
1632 } |
1662 |
1633 |
1663 // Move the scavenged monitors back to the global free list. |
1634 if (free_head_p != NULL) { |
1664 if (freeHeadp != NULL) { |
1635 // Move the deflated ObjectMonitors back to the global free list. |
1665 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); |
1636 guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant"); |
1666 assert(freeTailp->FreeNext == NULL, "invariant"); |
1637 assert(free_tail_p->_next_om == NULL, "invariant"); |
1667 // constant-time list splice - prepend scavenged segment to gFreeList |
1638 // constant-time list splice - prepend scavenged segment to g_free_list |
1668 freeTailp->FreeNext = gFreeList; |
1639 free_tail_p->_next_om = g_free_list; |
1669 gFreeList = freeHeadp; |
1640 g_free_list = free_head_p; |
1670 } |
1641 } |
1671 Thread::muxRelease(&gListLock); |
1642 Thread::muxRelease(&gListLock); |
1672 timer.stop(); |
1643 timer.stop(); |
1673 |
1644 |
1674 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1645 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1675 LogStreamHandle(Info, monitorinflation) lsh_info; |
1646 LogStreamHandle(Info, monitorinflation) lsh_info; |
1676 LogStream * ls = NULL; |
1647 LogStream* ls = NULL; |
1677 if (log_is_enabled(Debug, monitorinflation)) { |
1648 if (log_is_enabled(Debug, monitorinflation)) { |
1678 ls = &lsh_debug; |
1649 ls = &lsh_debug; |
1679 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { |
1650 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { |
1680 ls = &lsh_info; |
1651 ls = &lsh_info; |
1681 } |
1652 } |
1687 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1658 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { |
1688 // Report the cumulative time for deflating each thread's idle |
1659 // Report the cumulative time for deflating each thread's idle |
1689 // monitors. Note: if the work is split among more than one |
1660 // monitors. Note: if the work is split among more than one |
1690 // worker thread, then the reported time will likely be more |
1661 // worker thread, then the reported time will likely be more |
1691 // than a beginning to end measurement of the phase. |
1662 // than a beginning to end measurement of the phase. |
1692 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); |
1663 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); |
1693 |
1664 |
1694 gMonitorFreeCount += counters->nScavenged; |
1665 g_om_free_count += counters->n_scavenged; |
1695 |
1666 |
1696 if (log_is_enabled(Debug, monitorinflation)) { |
1667 if (log_is_enabled(Debug, monitorinflation)) { |
1697 // exit_globals()'s call to audit_and_print_stats() is done |
1668 // exit_globals()'s call to audit_and_print_stats() is done |
1698 // at the Info level. |
1669 // at the Info level. |
1699 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); |
1670 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); |
1700 } else if (log_is_enabled(Info, monitorinflation)) { |
1671 } else if (log_is_enabled(Info, monitorinflation)) { |
1701 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); |
1672 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); |
1702 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, " |
1673 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " |
1703 "gMonitorFreeCount=%d", gMonitorPopulation, |
1674 "g_om_free_count=%d", g_om_population, |
1704 gOmInUseCount, gMonitorFreeCount); |
1675 g_om_in_use_count, g_om_free_count); |
1705 Thread::muxRelease(&gListLock); |
1676 Thread::muxRelease(&gListLock); |
1706 } |
1677 } |
1707 |
1678 |
1708 ForceMonitorScavenge = 0; // Reset |
1679 ForceMonitorScavenge = 0; // Reset |
1709 |
1680 |
1710 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); |
1681 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); |
1711 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); |
1682 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); |
1712 |
1683 |
1713 GVars.stwRandom = os::random(); |
1684 GVars.stw_random = os::random(); |
1714 GVars.stwCycle++; |
1685 GVars.stw_cycle++; |
1715 } |
1686 } |
1716 |
1687 |
1717 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { |
1688 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { |
1718 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
1689 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
1719 |
1690 |
1720 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors |
1691 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors |
1721 ObjectMonitor * freeTailp = NULL; |
1692 ObjectMonitor* free_tail_p = NULL; |
1722 elapsedTimer timer; |
1693 elapsedTimer timer; |
1723 |
1694 |
1724 if (log_is_enabled(Info, safepoint, cleanup) || |
1695 if (log_is_enabled(Info, safepoint, cleanup) || |
1725 log_is_enabled(Info, monitorinflation)) { |
1696 log_is_enabled(Info, monitorinflation)) { |
1726 timer.start(); |
1697 timer.start(); |
1727 } |
1698 } |
1728 |
1699 |
1729 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); |
1700 int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p); |
1730 |
1701 |
1731 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); |
1702 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); |
1732 |
1703 |
1733 // Adjust counters |
1704 // Adjust counters |
1734 counters->nInCirculation += thread->omInUseCount; |
1705 counters->n_in_circulation += thread->om_in_use_count; |
1735 thread->omInUseCount -= deflated_count; |
1706 thread->om_in_use_count -= deflated_count; |
1736 counters->nScavenged += deflated_count; |
1707 counters->n_scavenged += deflated_count; |
1737 counters->nInuse += thread->omInUseCount; |
1708 counters->n_in_use += thread->om_in_use_count; |
1738 counters->perThreadScavenged += deflated_count; |
1709 counters->per_thread_scavenged += deflated_count; |
1739 |
1710 |
1740 // Move the scavenged monitors back to the global free list. |
1711 if (free_head_p != NULL) { |
1741 if (freeHeadp != NULL) { |
1712 // Move the deflated ObjectMonitors back to the global free list. |
1742 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); |
1713 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); |
1743 assert(freeTailp->FreeNext == NULL, "invariant"); |
1714 assert(free_tail_p->_next_om == NULL, "invariant"); |
1744 |
1715 |
1745 // constant-time list splice - prepend scavenged segment to gFreeList |
1716 // constant-time list splice - prepend scavenged segment to g_free_list |
1746 freeTailp->FreeNext = gFreeList; |
1717 free_tail_p->_next_om = g_free_list; |
1747 gFreeList = freeHeadp; |
1718 g_free_list = free_head_p; |
1748 } |
1719 } |
1749 |
1720 |
1750 timer.stop(); |
1721 timer.stop(); |
1751 // Safepoint logging cares about cumulative perThreadTimes and |
1722 // Safepoint logging cares about cumulative per_thread_times and |
1752 // we'll capture most of the cost, but not the muxRelease() which |
1723 // we'll capture most of the cost, but not the muxRelease() which |
1753 // should be cheap. |
1724 // should be cheap. |
1754 counters->perThreadTimes += timer.seconds(); |
1725 counters->per_thread_times += timer.seconds(); |
1755 |
1726 |
1756 Thread::muxRelease(&gListLock); |
1727 Thread::muxRelease(&gListLock); |
1757 |
1728 |
1758 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1729 LogStreamHandle(Debug, monitorinflation) lsh_debug; |
1759 LogStreamHandle(Info, monitorinflation) lsh_info; |
1730 LogStreamHandle(Info, monitorinflation) lsh_info; |
1760 LogStream * ls = NULL; |
1731 LogStream* ls = NULL; |
1761 if (log_is_enabled(Debug, monitorinflation)) { |
1732 if (log_is_enabled(Debug, monitorinflation)) { |
1762 ls = &lsh_debug; |
1733 ls = &lsh_debug; |
1763 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { |
1734 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { |
1764 ls = &lsh_info; |
1735 ls = &lsh_info; |
1765 } |
1736 } |
2025 "must have non-NULL _object field.", p2i(n)); |
1997 "must have non-NULL _object field.", p2i(n)); |
2026 } |
1998 } |
2027 *error_cnt_p = *error_cnt_p + 1; |
1999 *error_cnt_p = *error_cnt_p + 1; |
2028 } |
2000 } |
2029 const oop obj = (oop)n->object(); |
2001 const oop obj = (oop)n->object(); |
2030 const markOop mark = obj->mark(); |
2002 const markWord mark = obj->mark(); |
2031 if (!mark->has_monitor()) { |
2003 if (!mark.has_monitor()) { |
2032 if (jt != NULL) { |
2004 if (jt != NULL) { |
2033 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT |
2005 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT |
2034 ": in-use per-thread monitor's object does not think " |
2006 ": in-use per-thread monitor's object does not think " |
2035 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" |
2007 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" |
2036 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), p2i(mark)); |
2008 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); |
2037 } else { |
2009 } else { |
2038 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " |
2010 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " |
2039 "monitor's object does not think it has a monitor: obj=" |
2011 "monitor's object does not think it has a monitor: obj=" |
2040 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), |
2012 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), |
2041 p2i(obj), p2i(mark)); |
2013 p2i(obj), mark.value()); |
2042 } |
2014 } |
2043 *error_cnt_p = *error_cnt_p + 1; |
2015 *error_cnt_p = *error_cnt_p + 1; |
2044 } |
2016 } |
2045 ObjectMonitor * const obj_mon = mark->monitor(); |
2017 ObjectMonitor* const obj_mon = mark.monitor(); |
2046 if (n != obj_mon) { |
2018 if (n != obj_mon) { |
2047 if (jt != NULL) { |
2019 if (jt != NULL) { |
2048 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT |
2020 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT |
2049 ": in-use per-thread monitor's object does not refer " |
2021 ": in-use per-thread monitor's object does not refer " |
2050 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" |
2022 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" |
2051 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), |
2023 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), |
2052 p2i(n), p2i(obj), p2i(mark), p2i(obj_mon)); |
2024 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); |
2053 } else { |
2025 } else { |
2054 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " |
2026 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " |
2055 "monitor's object does not refer to the same monitor: obj=" |
2027 "monitor's object does not refer to the same monitor: obj=" |
2056 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" |
2028 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" |
2057 INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon)); |
2029 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); |
2058 } |
2030 } |
2059 *error_cnt_p = *error_cnt_p + 1; |
2031 *error_cnt_p = *error_cnt_p + 1; |
2060 } |
2032 } |
2061 } |
2033 } |
2062 |
2034 |
2063 // Check the thread's free list and count; log the results of the checks. |
2035 // Check the thread's free list and count; log the results of the checks. |
2064 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, |
2036 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, |
2065 outputStream * out, |
2037 outputStream * out, |
2066 int *error_cnt_p) { |
2038 int *error_cnt_p) { |
2067 int chkOmFreeCount = 0; |
2039 int chk_om_free_count = 0; |
2068 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) { |
2040 for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) { |
2069 chk_free_entry(jt, n, out, error_cnt_p); |
2041 chk_free_entry(jt, n, out, error_cnt_p); |
2070 chkOmFreeCount++; |
2042 chk_om_free_count++; |
2071 } |
2043 } |
2072 if (jt->omFreeCount == chkOmFreeCount) { |
2044 if (jt->om_free_count == chk_om_free_count) { |
2073 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals " |
2045 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " |
2074 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount); |
2046 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count); |
2075 } else { |
2047 } else { |
2076 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not " |
2048 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " |
2077 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, |
2049 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count, |
2078 chkOmFreeCount); |
2050 chk_om_free_count); |
2079 *error_cnt_p = *error_cnt_p + 1; |
2051 *error_cnt_p = *error_cnt_p + 1; |
2080 } |
2052 } |
2081 } |
2053 } |
2082 |
2054 |
2083 // Check the thread's in-use list and count; log the results of the checks. |
2055 // Check the thread's in-use list and count; log the results of the checks. |
2084 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, |
2056 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, |
2085 outputStream * out, |
2057 outputStream * out, |
2086 int *error_cnt_p) { |
2058 int *error_cnt_p) { |
2087 int chkOmInUseCount = 0; |
2059 int chk_om_in_use_count = 0; |
2088 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { |
2060 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { |
2089 chk_in_use_entry(jt, n, out, error_cnt_p); |
2061 chk_in_use_entry(jt, n, out, error_cnt_p); |
2090 chkOmInUseCount++; |
2062 chk_om_in_use_count++; |
2091 } |
2063 } |
2092 if (jt->omInUseCount == chkOmInUseCount) { |
2064 if (jt->om_in_use_count == chk_om_in_use_count) { |
2093 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals " |
2065 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " |
2094 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, |
2066 "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count, |
2095 chkOmInUseCount); |
2067 chk_om_in_use_count); |
2096 } else { |
2068 } else { |
2097 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not " |
2069 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " |
2098 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, |
2070 "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count, |
2099 chkOmInUseCount); |
2071 chk_om_in_use_count); |
2100 *error_cnt_p = *error_cnt_p + 1; |
2072 *error_cnt_p = *error_cnt_p + 1; |
2101 } |
2073 } |
2102 } |
2074 } |
2103 |
2075 |
2104 // Log details about ObjectMonitors on the in-use lists. The 'BHL' |
2076 // Log details about ObjectMonitors on the in-use lists. The 'BHL' |
2135 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); |
2113 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); |
2136 out->print_cr("%18s %18s %s %18s %18s", |
2114 out->print_cr("%18s %18s %s %18s %18s", |
2137 "jt", "monitor", "BHL", "object", "object type"); |
2115 "jt", "monitor", "BHL", "object", "object type"); |
2138 out->print_cr("================== ================== === ================== =================="); |
2116 out->print_cr("================== ================== === ================== =================="); |
2139 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { |
2117 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { |
2140 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { |
2118 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { |
2141 const oop obj = (oop) n->object(); |
2119 const oop obj = (oop) n->object(); |
2142 const markOop mark = n->header(); |
2120 const markWord mark = n->header(); |
2143 ResourceMark rm; |
2121 ResourceMark rm; |
2144 out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT |
2122 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT |
2145 " %s", p2i(jt), p2i(n), n->is_busy() != 0, |
2123 " %s", p2i(jt), p2i(n), n->is_busy() != 0, |
2146 mark->hash() != 0, n->owner() != NULL, p2i(obj), |
2124 mark.hash() != 0, n->owner() != NULL, p2i(obj), |
2147 obj->klass()->external_name()); |
2125 obj->klass()->external_name()); |
|
2126 if (n->is_busy() != 0) { |
|
2127 out->print(" (%s)", n->is_busy_to_string(&ss)); |
|
2128 ss.reset(); |
|
2129 } |
|
2130 out->cr(); |
2148 } |
2131 } |
2149 } |
2132 } |
2150 |
2133 |
2151 out->flush(); |
2134 out->flush(); |
2152 } |
2135 } |
2153 |
2136 |
2154 // Log counts for the global and per-thread monitor lists and return |
2137 // Log counts for the global and per-thread monitor lists and return |
2155 // the population count. |
2138 // the population count. |
2156 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { |
2139 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { |
2157 int popCount = 0; |
2140 int pop_count = 0; |
2158 out->print_cr("%18s %10s %10s %10s", |
2141 out->print_cr("%18s %10s %10s %10s", |
2159 "Global Lists:", "InUse", "Free", "Total"); |
2142 "Global Lists:", "InUse", "Free", "Total"); |
2160 out->print_cr("================== ========== ========== =========="); |
2143 out->print_cr("================== ========== ========== =========="); |
2161 out->print_cr("%18s %10d %10d %10d", "", |
2144 out->print_cr("%18s %10d %10d %10d", "", |
2162 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation); |
2145 g_om_in_use_count, g_om_free_count, g_om_population); |
2163 popCount += gOmInUseCount + gMonitorFreeCount; |
2146 pop_count += g_om_in_use_count + g_om_free_count; |
2164 |
2147 |
2165 out->print_cr("%18s %10s %10s %10s", |
2148 out->print_cr("%18s %10s %10s %10s", |
2166 "Per-Thread Lists:", "InUse", "Free", "Provision"); |
2149 "Per-Thread Lists:", "InUse", "Free", "Provision"); |
2167 out->print_cr("================== ========== ========== =========="); |
2150 out->print_cr("================== ========== ========== =========="); |
2168 |
2151 |
2169 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { |
2152 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { |
2170 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), |
2153 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), |
2171 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision); |
2154 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision); |
2172 popCount += jt->omInUseCount + jt->omFreeCount; |
2155 pop_count += jt->om_in_use_count + jt->om_free_count; |
2173 } |
2156 } |
2174 return popCount; |
2157 return pop_count; |
2175 } |
2158 } |
2176 |
2159 |
2177 #ifndef PRODUCT |
2160 #ifndef PRODUCT |
2178 |
2161 |
2179 // Check if monitor belongs to the monitor cache |
2162 // Check if monitor belongs to the monitor cache |
2180 // The list is grow-only so it's *relatively* safe to traverse |
2163 // The list is grow-only so it's *relatively* safe to traverse |
2181 // the list of extant blocks without taking a lock. |
2164 // the list of extant blocks without taking a lock. |
2182 |
2165 |
2183 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { |
2166 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { |
2184 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); |
2167 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); |
2185 while (block != NULL) { |
2168 while (block != NULL) { |
2186 assert(block->object() == CHAINMARKER, "must be a block header"); |
2169 assert(block->object() == CHAINMARKER, "must be a block header"); |
2187 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { |
2170 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { |
2188 address mon = (address)monitor; |
2171 address mon = (address)monitor; |
2189 address blk = (address)block; |
2172 address blk = (address)block; |
2190 size_t diff = mon - blk; |
2173 size_t diff = mon - blk; |
2191 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); |
2174 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); |
2192 return 1; |
2175 return 1; |
2193 } |
2176 } |
2194 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; |
2177 block = (PaddedObjectMonitor*)block->_next_om; |
2195 } |
2178 } |
2196 return 0; |
2179 return 0; |
2197 } |
2180 } |
2198 |
2181 |
2199 #endif |
2182 #endif |