42 #include "utilities/events.hpp" |
42 #include "utilities/events.hpp" |
43 #include "utilities/preserveException.hpp" |
43 #include "utilities/preserveException.hpp" |
44 |
44 |
45 #if defined(__GNUC__) && !defined(PPC64) |
45 #if defined(__GNUC__) && !defined(PPC64) |
46 // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
46 // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
47 #define ATTR __attribute__((noinline)) |
47 #define NOINLINE __attribute__((noinline)) |
48 #else |
48 #else |
49 #define ATTR |
49 #define NOINLINE |
50 #endif |
50 #endif |
51 |
51 |
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
53 |
53 |
54 // The "core" versions of monitor enter and exit reside in this file. |
54 // The "core" versions of monitor enter and exit reside in this file. |
203 assert(lock != mark->locker(), "must not re-lock the same lock"); |
203 assert(lock != mark->locker(), "must not re-lock the same lock"); |
204 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); |
204 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); |
205 lock->set_displaced_header(NULL); |
205 lock->set_displaced_header(NULL); |
206 return; |
206 return; |
207 } |
207 } |
208 |
|
209 #if 0 |
|
210 // The following optimization isn't particularly useful. |
|
211 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { |
|
212 lock->set_displaced_header(NULL); |
|
213 return; |
|
214 } |
|
215 #endif |
|
216 |
208 |
217 // The object header will never be displaced to this lock, |
209 // The object header will never be displaced to this lock, |
218 // so it does not matter what the value is, except that it |
210 // so it does not matter what the value is, except that it |
219 // must be non-zero to avoid looking like a re-entrant lock, |
211 // must be non-zero to avoid looking like a re-entrant lock, |
220 // and must not look locked either. |
212 // and must not look locked either. |
571 // the call sites of identity_hash that might revoke biases have |
563 // the call sites of identity_hash that might revoke biases have |
572 // been checked to make sure they can handle a safepoint. The |
564 // been checked to make sure they can handle a safepoint. The |
573 // added check of the bias pattern is to avoid useless calls to |
565 // added check of the bias pattern is to avoid useless calls to |
574 // thread-local storage. |
566 // thread-local storage. |
575 if (obj->mark()->has_bias_pattern()) { |
567 if (obj->mark()->has_bias_pattern()) { |
576 // Box and unbox the raw reference just in case we cause a STW safepoint. |
568 // Handle for oop obj in case of STW safepoint |
577 Handle hobj(Self, obj); |
569 Handle hobj(Self, obj); |
578 // Relaxing assertion for bug 6320749. |
570 // Relaxing assertion for bug 6320749. |
579 assert(Universe::verify_in_progress() || |
571 assert(Universe::verify_in_progress() || |
580 !SafepointSynchronize::is_at_safepoint(), |
572 !SafepointSynchronize::is_at_safepoint(), |
581 "biases should not be seen by VM thread here"); |
573 "biases should not be seen by VM thread here"); |
888 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
880 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; |
889 ::fflush(stdout); |
881 ::fflush(stdout); |
890 } |
882 } |
891 } |
883 } |
892 } |
884 } |
893 /* Too slow for general assert or debug |
885 |
894 void ObjectSynchronizer::verifyInUse (Thread *Self) { |
886 void ObjectSynchronizer::verifyInUse (Thread *Self) { |
895 ObjectMonitor* mid; |
887 ObjectMonitor* mid; |
896 int inusetally = 0; |
888 int inusetally = 0; |
897 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { |
889 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { |
898 inusetally ++; |
890 inusetally++; |
899 } |
891 } |
900 assert(inusetally == Self->omInUseCount, "inuse count off"); |
892 assert(inusetally == Self->omInUseCount, "inuse count off"); |
901 |
893 |
902 int freetally = 0; |
894 int freetally = 0; |
903 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { |
895 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { |
904 freetally ++; |
896 freetally++; |
905 } |
897 } |
906 assert(freetally == Self->omFreeCount, "free count off"); |
898 assert(freetally == Self->omFreeCount, "free count off"); |
907 } |
899 } |
908 */ |
900 |
909 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { |
901 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc (Thread * Self) { |
910 // A large MAXPRIVATE value reduces both list lock contention |
902 // A large MAXPRIVATE value reduces both list lock contention |
911 // and list coherency traffic, but also tends to increase the |
903 // and list coherency traffic, but also tends to increase the |
912 // number of objectMonitors in circulation as well as the STW |
904 // number of objectMonitors in circulation as well as the STW |
913 // scavenge costs. As usual, we lean toward time in space-time |
905 // scavenge costs. As usual, we lean toward time in space-time |
914 // tradeoffs. |
906 // tradeoffs. |
930 guarantee(m->object() == NULL, "invariant"); |
922 guarantee(m->object() == NULL, "invariant"); |
931 if (MonitorInUseLists) { |
923 if (MonitorInUseLists) { |
932 m->FreeNext = Self->omInUseList; |
924 m->FreeNext = Self->omInUseList; |
933 Self->omInUseList = m; |
925 Self->omInUseList = m; |
934 Self->omInUseCount++; |
926 Self->omInUseCount++; |
935 // verifyInUse(Self); |
927 if (ObjectMonitor::Knob_VerifyInUse) { |
|
928 verifyInUse(Self); |
|
929 } |
936 } else { |
930 } else { |
937 m->FreeNext = NULL; |
931 m->FreeNext = NULL; |
938 } |
932 } |
939 return m; |
933 return m; |
940 } |
934 } |
1050 Self->omInUseList = mid->FreeNext; |
1044 Self->omInUseList = mid->FreeNext; |
1051 } else if (curmidinuse != NULL) { |
1045 } else if (curmidinuse != NULL) { |
1052 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
1046 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist |
1053 } |
1047 } |
1054 Self->omInUseCount--; |
1048 Self->omInUseCount--; |
1055 // verifyInUse(Self); |
1049 if (ObjectMonitor::Knob_VerifyInUse) { |
|
1050 verifyInUse(Self); |
|
1051 } |
1056 break; |
1052 break; |
1057 } else { |
1053 } else { |
1058 curmidinuse = mid; |
1054 curmidinuse = mid; |
1059 mid = mid->FreeNext; |
1055 mid = mid->FreeNext; |
1060 } |
1056 } |
1061 } |
1057 } |
1062 } |
1058 } |
1063 |
1059 |
1064 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new |
1060 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new |
1065 m->FreeNext = Self->omFreeList; |
1061 m->FreeNext = Self->omFreeList; |
1066 Self->omFreeList = m; |
1062 Self->omFreeList = m; |
1067 Self->omFreeCount++; |
1063 Self->omFreeCount++; |
1068 } |
1064 } |
1069 |
1065 |
1072 // it's dying. We could also consider having the VM thread steal |
1068 // it's dying. We could also consider having the VM thread steal |
1073 // monitors from threads that have not run java code over a few |
1069 // monitors from threads that have not run java code over a few |
1074 // consecutive STW safepoints. Relatedly, we might decay |
1070 // consecutive STW safepoints. Relatedly, we might decay |
1075 // omFreeProvision at STW safepoints. |
1071 // omFreeProvision at STW safepoints. |
1076 // |
1072 // |
1077 // Also return the monitors of a moribund thread"s omInUseList to |
1073 // Also return the monitors of a moribund thread's omInUseList to |
1078 // a global gOmInUseList under the global list lock so these |
1074 // a global gOmInUseList under the global list lock so these |
1079 // will continue to be scanned. |
1075 // will continue to be scanned. |
1080 // |
1076 // |
1081 // We currently call omFlush() from the Thread:: dtor _after the thread |
1077 // We currently call omFlush() from the Thread:: dtor _after the thread |
1082 // has been excised from the thread list and is no longer a mutator. |
1078 // has been excised from the thread list and is no longer a mutator. |
1113 ObjectMonitor *curom; |
1109 ObjectMonitor *curom; |
1114 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { |
1110 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { |
1115 InUseTail = curom; |
1111 InUseTail = curom; |
1116 InUseTally++; |
1112 InUseTally++; |
1117 } |
1113 } |
1118 // TODO debug |
|
1119 assert(Self->omInUseCount == InUseTally, "inuse count off"); |
1114 assert(Self->omInUseCount == InUseTally, "inuse count off"); |
1120 Self->omInUseCount = 0; |
1115 Self->omInUseCount = 0; |
1121 guarantee(InUseTail != NULL && InUseList != NULL, "invariant"); |
1116 guarantee(InUseTail != NULL && InUseList != NULL, "invariant"); |
1122 } |
1117 } |
1123 |
1118 |
1152 |
1147 |
1153 // Note that we could encounter some performance loss through false-sharing as |
1148 // Note that we could encounter some performance loss through false-sharing as |
1154 // multiple locks occupy the same $ line. Padding might be appropriate. |
1149 // multiple locks occupy the same $ line. Padding might be appropriate. |
1155 |
1150 |
1156 |
1151 |
1157 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { |
1152 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate (Thread * Self, oop object) { |
1158 // Inflate mutates the heap ... |
1153 // Inflate mutates the heap ... |
1159 // Relaxing assertion for bug 6320749. |
1154 // Relaxing assertion for bug 6320749. |
1160 assert(Universe::verify_in_progress() || |
1155 assert(Universe::verify_in_progress() || |
1161 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
1156 !SafepointSynchronize::is_at_safepoint(), "invariant"); |
1162 |
1157 |
1383 }; |
1378 }; |
1384 |
1379 |
1385 // Deflate a single monitor if not in use |
1380 // Deflate a single monitor if not in use |
1386 // Return true if deflated, false if in use |
1381 // Return true if deflated, false if in use |
1387 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, |
1382 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, |
1388 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { |
1383 ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) { |
1389 bool deflated; |
1384 bool deflated; |
1390 // Normal case ... The monitor is associated with obj. |
1385 // Normal case ... The monitor is associated with obj. |
1391 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); |
1386 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); |
1392 guarantee(mid == obj->mark()->monitor(), "invariant"); |
1387 guarantee(mid == obj->mark()->monitor(), "invariant"); |
1393 guarantee(mid->header()->is_neutral(), "invariant"); |
1388 guarantee(mid->header()->is_neutral(), "invariant"); |
1413 mid->clear(); |
1408 mid->clear(); |
1414 |
1409 |
1415 assert(mid->object() == NULL, "invariant"); |
1410 assert(mid->object() == NULL, "invariant"); |
1416 |
1411 |
1417 // Move the object to the working free list defined by FreeHead,FreeTail. |
1412 // Move the object to the working free list defined by FreeHead,FreeTail. |
1418 if (*FreeHeadp == NULL) *FreeHeadp = mid; |
1413 if (*freeHeadp == NULL) *freeHeadp = mid; |
1419 if (*FreeTailp != NULL) { |
1414 if (*freeTailp != NULL) { |
1420 ObjectMonitor * prevtail = *FreeTailp; |
1415 ObjectMonitor * prevtail = *freeTailp; |
1421 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK |
1416 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK |
1422 prevtail->FreeNext = mid; |
1417 prevtail->FreeNext = mid; |
1423 } |
1418 } |
1424 *FreeTailp = mid; |
1419 *freeTailp = mid; |
1425 deflated = true; |
1420 deflated = true; |
1426 } |
1421 } |
1427 return deflated; |
1422 return deflated; |
1428 } |
1423 } |
1429 |
1424 |
1430 // Caller acquires ListLock |
1425 // Caller acquires ListLock |
1431 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, |
1426 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, |
1432 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { |
1427 ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) { |
1433 ObjectMonitor* mid; |
1428 ObjectMonitor* mid; |
1434 ObjectMonitor* next; |
1429 ObjectMonitor* next; |
1435 ObjectMonitor* curmidinuse = NULL; |
1430 ObjectMonitor* curmidinuse = NULL; |
1436 int deflatedcount = 0; |
1431 int deflatedcount = 0; |
1437 |
1432 |
1438 for (mid = *listheadp; mid != NULL;) { |
1433 for (mid = *listheadp; mid != NULL;) { |
1439 oop obj = (oop) mid->object(); |
1434 oop obj = (oop) mid->object(); |
1440 bool deflated = false; |
1435 bool deflated = false; |
1441 if (obj != NULL) { |
1436 if (obj != NULL) { |
1442 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp); |
1437 deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp); |
1443 } |
1438 } |
1444 if (deflated) { |
1439 if (deflated) { |
1445 // extract from per-thread in-use-list |
1440 // extract from per-thread in-use-list |
1446 if (mid == *listheadp) { |
1441 if (mid == *listheadp) { |
1447 *listheadp = mid->FreeNext; |
1442 *listheadp = mid->FreeNext; |
1480 int inUse = 0; |
1475 int inUse = 0; |
1481 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { |
1476 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { |
1482 nInCirculation+= cur->omInUseCount; |
1477 nInCirculation+= cur->omInUseCount; |
1483 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); |
1478 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); |
1484 cur->omInUseCount-= deflatedcount; |
1479 cur->omInUseCount-= deflatedcount; |
1485 // verifyInUse(cur); |
1480 if (ObjectMonitor::Knob_VerifyInUse) { |
|
1481 verifyInUse(cur); |
|
1482 } |
1486 nScavenged += deflatedcount; |
1483 nScavenged += deflatedcount; |
1487 nInuse += cur->omInUseCount; |
1484 nInuse += cur->omInUseCount; |
1488 } |
1485 } |
1489 |
1486 |
1490 // For moribund threads, scan gOmInUseList |
1487 // For moribund threads, scan gOmInUseList |