128 #endif |
128 #endif |
129 |
129 |
130 #ifndef SUPPORTS_NATIVE_CX8 |
130 #ifndef SUPPORTS_NATIVE_CX8 |
131 Mutex* UnsafeJlong_lock = NULL; |
131 Mutex* UnsafeJlong_lock = NULL; |
132 #endif |
132 #endif |
133 Monitor* CodeHeapStateAnalytics_lock = NULL; |
133 Mutex* CodeHeapStateAnalytics_lock = NULL; |
134 |
134 |
135 Mutex* MetaspaceExpand_lock = NULL; |
135 Mutex* MetaspaceExpand_lock = NULL; |
136 Mutex* ClassLoaderDataGraph_lock = NULL; |
136 Mutex* ClassLoaderDataGraph_lock = NULL; |
137 Monitor* ThreadsSMRDelete_lock = NULL; |
137 Monitor* ThreadsSMRDelete_lock = NULL; |
138 Mutex* SharedDecoder_lock = NULL; |
138 Mutex* SharedDecoder_lock = NULL; |
151 Monitor* JVMCI_lock = NULL; |
151 Monitor* JVMCI_lock = NULL; |
152 #endif |
152 #endif |
153 |
153 |
154 |
154 |
155 #define MAX_NUM_MUTEX 128 |
155 #define MAX_NUM_MUTEX 128 |
156 static Monitor * _mutex_array[MAX_NUM_MUTEX]; |
156 static Mutex* _mutex_array[MAX_NUM_MUTEX]; |
157 static int _num_mutex; |
157 static int _num_mutex; |
158 |
158 |
159 #ifdef ASSERT |
159 #ifdef ASSERT |
160 void assert_locked_or_safepoint(const Monitor * lock) { |
160 void assert_locked_or_safepoint(const Mutex* lock) { |
161 // check if this thread owns the lock (common case) |
161 // check if this thread owns the lock (common case) |
162 if (IgnoreLockingAssertions) return; |
162 if (IgnoreLockingAssertions) return; |
163 assert(lock != NULL, "Need non-NULL lock"); |
163 assert(lock != NULL, "Need non-NULL lock"); |
164 if (lock->owned_by_self()) return; |
164 if (lock->owned_by_self()) return; |
165 if (SafepointSynchronize::is_at_safepoint()) return; |
165 if (SafepointSynchronize::is_at_safepoint()) return; |
169 if (op != NULL && op->calling_thread() == lock->owner()) return; |
169 if (op != NULL && op->calling_thread() == lock->owner()) return; |
170 fatal("must own lock %s", lock->name()); |
170 fatal("must own lock %s", lock->name()); |
171 } |
171 } |
172 |
172 |
173 // a weaker assertion than the above |
173 // a weaker assertion than the above |
174 void assert_locked_or_safepoint_weak(const Monitor * lock) { |
174 void assert_locked_or_safepoint_weak(const Mutex* lock) { |
175 if (IgnoreLockingAssertions) return; |
175 if (IgnoreLockingAssertions) return; |
176 assert(lock != NULL, "Need non-NULL lock"); |
176 assert(lock != NULL, "Need non-NULL lock"); |
177 if (lock->is_locked()) return; |
177 if (lock->is_locked()) return; |
178 if (SafepointSynchronize::is_at_safepoint()) return; |
178 if (SafepointSynchronize::is_at_safepoint()) return; |
179 if (!Universe::is_fully_initialized()) return; |
179 if (!Universe::is_fully_initialized()) return; |
180 fatal("must own lock %s", lock->name()); |
180 fatal("must own lock %s", lock->name()); |
181 } |
181 } |
182 |
182 |
183 // a stronger assertion than the above |
183 // a stronger assertion than the above |
184 void assert_lock_strong(const Monitor * lock) { |
184 void assert_lock_strong(const Mutex* lock) { |
185 if (IgnoreLockingAssertions) return; |
185 if (IgnoreLockingAssertions) return; |
186 assert(lock != NULL, "Need non-NULL lock"); |
186 assert(lock != NULL, "Need non-NULL lock"); |
187 if (lock->owned_by_self()) return; |
187 if (lock->owned_by_self()) return; |
188 fatal("must own lock %s", lock->name()); |
188 fatal("must own lock %s", lock->name()); |
189 } |
189 } |
223 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); |
223 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); |
224 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); |
224 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); |
225 } |
225 } |
226 def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_always); |
226 def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_always); |
227 def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_always); |
227 def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_always); |
228 def(CodeCache_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); |
228 def(CodeCache_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); |
229 def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); |
229 def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); |
230 def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation. |
230 def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation. |
231 |
231 |
232 def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); |
232 def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); |
233 def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); |
233 def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); |
332 #endif |
332 #endif |
333 def(DumpTimeTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); |
333 def(DumpTimeTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); |
334 #endif // INCLUDE_CDS |
334 #endif // INCLUDE_CDS |
335 } |
335 } |
336 |
336 |
337 GCMutexLocker::GCMutexLocker(Monitor * mutex) { |
337 GCMutexLocker::GCMutexLocker(Mutex* mutex) { |
338 if (SafepointSynchronize::is_at_safepoint()) { |
338 if (SafepointSynchronize::is_at_safepoint()) { |
339 _locked = false; |
339 _locked = false; |
340 } else { |
340 } else { |
341 _mutex = mutex; |
341 _mutex = mutex; |
342 _locked = true; |
342 _locked = true; |