117 // is fairly rare, this is not of concern. The RawMonitor_lock can not |
117 // is fairly rare, this is not of concern. The RawMonitor_lock can not |
118 // be held indefinitely. The critical sections must be short and bounded. |
118 // be held indefinitely. The critical sections must be short and bounded. |
119 // |
119 // |
120 // ------------------------------------------------------------------------- |
120 // ------------------------------------------------------------------------- |
121 |
121 |
122 void JvmtiRawMonitor::SimpleEnter (Thread * Self) { |
122 void JvmtiRawMonitor::simple_enter(Thread* self) { |
123 for (;;) { |
123 for (;;) { |
124 if (Atomic::replace_if_null(Self, &_owner)) { |
124 if (Atomic::replace_if_null(self, &_owner)) { |
125 return ; |
125 return; |
126 } |
126 } |
127 |
127 |
128 QNode Node (Self) ; |
128 QNode node(self); |
129 Self->_ParkEvent->reset() ; // strictly optional |
129 self->_ParkEvent->reset(); // strictly optional |
130 Node.TState = QNode::TS_ENTER ; |
130 node._t_state = QNode::TS_ENTER; |
131 |
131 |
132 RawMonitor_lock->lock_without_safepoint_check() ; |
132 RawMonitor_lock->lock_without_safepoint_check(); |
133 Node._next = _EntryList ; |
133 node._next = _entry_list; |
134 _EntryList = &Node ; |
134 _entry_list = &node; |
135 OrderAccess::fence() ; |
135 OrderAccess::fence(); |
136 if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) { |
136 if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) { |
137 _EntryList = Node._next ; |
137 _entry_list = node._next; |
138 RawMonitor_lock->unlock() ; |
138 RawMonitor_lock->unlock(); |
139 return ; |
139 return; |
140 } |
140 } |
141 RawMonitor_lock->unlock() ; |
141 RawMonitor_lock->unlock(); |
142 while (Node.TState == QNode::TS_ENTER) { |
142 while (node._t_state == QNode::TS_ENTER) { |
143 Self->_ParkEvent->park() ; |
143 self->_ParkEvent->park(); |
144 } |
144 } |
145 } |
145 } |
146 } |
146 } |
147 |
147 |
148 void JvmtiRawMonitor::SimpleExit (Thread * Self) { |
148 void JvmtiRawMonitor::simple_exit(Thread* self) { |
149 guarantee (_owner == Self, "invariant") ; |
149 guarantee(_owner == self, "invariant"); |
150 OrderAccess::release_store(&_owner, (Thread*)NULL) ; |
150 OrderAccess::release_store(&_owner, (Thread*)NULL); |
151 OrderAccess::fence() ; |
151 OrderAccess::fence(); |
152 if (_EntryList == NULL) return ; |
152 if (_entry_list == NULL) { |
153 QNode * w ; |
153 return; |
154 |
154 } |
155 RawMonitor_lock->lock_without_safepoint_check() ; |
155 |
156 w = _EntryList ; |
156 RawMonitor_lock->lock_without_safepoint_check(); |
|
157 QNode* w = _entry_list; |
157 if (w != NULL) { |
158 if (w != NULL) { |
158 _EntryList = w->_next ; |
159 _entry_list = w->_next; |
159 } |
160 } |
160 RawMonitor_lock->unlock() ; |
161 RawMonitor_lock->unlock(); |
161 if (w != NULL) { |
162 if (w != NULL) { |
162 guarantee (w ->TState == QNode::TS_ENTER, "invariant") ; |
163 guarantee(w ->_t_state == QNode::TS_ENTER, "invariant"); |
163 // Once we set TState to TS_RUN the waiting thread can complete |
164 // Once we set _t_state to TS_RUN the waiting thread can complete |
164 // SimpleEnter and 'w' is pointing into random stack space. So we have |
165 // simple_enter and 'w' is pointing into random stack space. So we have |
165 // to ensure we extract the ParkEvent (which is in type-stable memory) |
166 // to ensure we extract the ParkEvent (which is in type-stable memory) |
166 // before we set the state, and then don't access 'w'. |
167 // before we set the state, and then don't access 'w'. |
167 ParkEvent * ev = w->_event ; |
168 ParkEvent* ev = w->_event; |
168 OrderAccess::loadstore(); |
169 OrderAccess::loadstore(); |
169 w->TState = QNode::TS_RUN ; |
170 w->_t_state = QNode::TS_RUN; |
170 OrderAccess::fence() ; |
171 OrderAccess::fence(); |
171 ev->unpark() ; |
172 ev->unpark(); |
172 } |
173 } |
173 return ; |
174 return; |
174 } |
175 } |
175 |
176 |
176 int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) { |
177 int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { |
177 guarantee (_owner == Self , "invariant") ; |
178 guarantee(_owner == self , "invariant"); |
178 guarantee (_recursions == 0, "invariant") ; |
179 guarantee(_recursions == 0, "invariant"); |
179 |
180 |
180 QNode Node (Self) ; |
181 QNode node(self); |
181 Node._notified = 0 ; |
182 node._notified = 0; |
182 Node.TState = QNode::TS_WAIT ; |
183 node._t_state = QNode::TS_WAIT; |
183 |
184 |
184 RawMonitor_lock->lock_without_safepoint_check() ; |
185 RawMonitor_lock->lock_without_safepoint_check(); |
185 Node._next = _WaitSet ; |
186 node._next = _wait_set; |
186 _WaitSet = &Node ; |
187 _wait_set = &node; |
187 RawMonitor_lock->unlock() ; |
188 RawMonitor_lock->unlock(); |
188 |
189 |
189 SimpleExit (Self) ; |
190 simple_exit(self); |
190 guarantee (_owner != Self, "invariant") ; |
191 guarantee(_owner != self, "invariant"); |
191 |
192 |
192 int ret = OS_OK ; |
193 int ret = OS_OK; |
193 if (millis <= 0) { |
194 if (millis <= 0) { |
194 Self->_ParkEvent->park(); |
195 self->_ParkEvent->park(); |
195 } else { |
196 } else { |
196 ret = Self->_ParkEvent->park(millis); |
197 ret = self->_ParkEvent->park(millis); |
197 } |
198 } |
198 |
199 |
199 // If thread still resides on the waitset then unlink it. |
200 // If thread still resides on the waitset then unlink it. |
200 // Double-checked locking -- the usage is safe in this context |
201 // Double-checked locking -- the usage is safe in this context |
201 // as TState is volatile and the lock-unlock operators are |
202 // as _t_state is volatile and the lock-unlock operators are |
202 // serializing (barrier-equivalent). |
203 // serializing (barrier-equivalent). |
203 |
204 |
204 if (Node.TState == QNode::TS_WAIT) { |
205 if (node._t_state == QNode::TS_WAIT) { |
205 RawMonitor_lock->lock_without_safepoint_check() ; |
206 RawMonitor_lock->lock_without_safepoint_check(); |
206 if (Node.TState == QNode::TS_WAIT) { |
207 if (node._t_state == QNode::TS_WAIT) { |
207 // Simple O(n) unlink, but performance isn't critical here. |
208 // Simple O(n) unlink, but performance isn't critical here. |
208 QNode * p ; |
209 QNode* p; |
209 QNode * q = NULL ; |
210 QNode* q = NULL; |
210 for (p = _WaitSet ; p != &Node; p = p->_next) { |
211 for (p = _wait_set; p != &node; p = p->_next) { |
211 q = p ; |
212 q = p; |
212 } |
213 } |
213 guarantee (p == &Node, "invariant") ; |
214 guarantee(p == &node, "invariant"); |
214 if (q == NULL) { |
215 if (q == NULL) { |
215 guarantee (p == _WaitSet, "invariant") ; |
216 guarantee (p == _wait_set, "invariant"); |
216 _WaitSet = p->_next ; |
217 _wait_set = p->_next; |
217 } else { |
218 } else { |
218 guarantee (p == q->_next, "invariant") ; |
219 guarantee(p == q->_next, "invariant"); |
219 q->_next = p->_next ; |
220 q->_next = p->_next; |
220 } |
221 } |
221 Node.TState = QNode::TS_RUN ; |
222 node._t_state = QNode::TS_RUN; |
222 } |
223 } |
223 RawMonitor_lock->unlock() ; |
224 RawMonitor_lock->unlock(); |
224 } |
225 } |
225 |
226 |
226 guarantee (Node.TState == QNode::TS_RUN, "invariant") ; |
227 guarantee(node._t_state == QNode::TS_RUN, "invariant"); |
227 SimpleEnter (Self) ; |
228 simple_enter(self); |
228 |
229 |
229 guarantee (_owner == Self, "invariant") ; |
230 guarantee(_owner == self, "invariant"); |
230 guarantee (_recursions == 0, "invariant") ; |
231 guarantee(_recursions == 0, "invariant"); |
231 return ret ; |
232 return ret; |
232 } |
233 } |
233 |
234 |
234 void JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) { |
235 void JvmtiRawMonitor::simple_notify(Thread* self, bool all) { |
235 guarantee (_owner == Self, "invariant") ; |
236 guarantee(_owner == self, "invariant"); |
236 if (_WaitSet == NULL) return ; |
237 if (_wait_set == NULL) { |
|
238 return; |
|
239 } |
237 |
240 |
238 // We have two options: |
241 // We have two options: |
239 // A. Transfer the threads from the WaitSet to the EntryList |
242 // A. Transfer the threads from the _wait_set to the _entry_list |
240 // B. Remove the thread from the WaitSet and unpark() it. |
243 // B. Remove the thread from the _wait_set and unpark() it. |
241 // |
244 // |
242 // We use (B), which is crude and results in lots of futile |
245 // We use (B), which is crude and results in lots of futile |
243 // context switching. In particular (B) induces lots of contention. |
246 // context switching. In particular (B) induces lots of contention. |
244 |
247 |
245 ParkEvent * ev = NULL ; // consider using a small auto array ... |
248 ParkEvent* ev = NULL; // consider using a small auto array ... |
246 RawMonitor_lock->lock_without_safepoint_check() ; |
249 RawMonitor_lock->lock_without_safepoint_check(); |
247 for (;;) { |
250 for (;;) { |
248 QNode * w = _WaitSet ; |
251 QNode* w = _wait_set; |
249 if (w == NULL) break ; |
252 if (w == NULL) break; |
250 _WaitSet = w->_next ; |
253 _wait_set = w->_next; |
251 if (ev != NULL) { ev->unpark(); ev = NULL; } |
254 if (ev != NULL) { |
252 ev = w->_event ; |
255 ev->unpark(); |
253 OrderAccess::loadstore() ; |
256 ev = NULL; |
254 w->TState = QNode::TS_RUN ; |
257 } |
255 OrderAccess::storeload(); |
258 ev = w->_event; |
256 if (!All) break ; |
259 OrderAccess::loadstore(); |
257 } |
260 w->_t_state = QNode::TS_RUN; |
258 RawMonitor_lock->unlock() ; |
261 OrderAccess::storeload(); |
259 if (ev != NULL) ev->unpark(); |
262 if (!all) { |
260 return ; |
263 break; |
|
264 } |
|
265 } |
|
266 RawMonitor_lock->unlock(); |
|
267 if (ev != NULL) { |
|
268 ev->unpark(); |
|
269 } |
|
270 return; |
261 } |
271 } |
262 |
272 |
263 // Any JavaThread will enter here with state _thread_blocked |
273 // Any JavaThread will enter here with state _thread_blocked |
264 void JvmtiRawMonitor::raw_enter(Thread * Self) { |
274 void JvmtiRawMonitor::raw_enter(Thread* self) { |
265 void * Contended ; |
275 void* contended; |
266 JavaThread * jt = NULL; |
276 JavaThread* jt = NULL; |
267 // don't enter raw monitor if thread is being externally suspended, it will |
277 // don't enter raw monitor if thread is being externally suspended, it will |
268 // surprise the suspender if a "suspended" thread can still enter monitor |
278 // surprise the suspender if a "suspended" thread can still enter monitor |
269 if (Self->is_Java_thread()) { |
279 if (self->is_Java_thread()) { |
270 jt = (JavaThread*) Self; |
280 jt = (JavaThread*)self; |
271 jt->SR_lock()->lock_without_safepoint_check(); |
281 jt->SR_lock()->lock_without_safepoint_check(); |
272 while (jt->is_external_suspend()) { |
282 while (jt->is_external_suspend()) { |
273 jt->SR_lock()->unlock(); |
283 jt->SR_lock()->unlock(); |
274 jt->java_suspend_self(); |
284 jt->java_suspend_self(); |
275 jt->SR_lock()->lock_without_safepoint_check(); |
285 jt->SR_lock()->lock_without_safepoint_check(); |
276 } |
286 } |
277 // guarded by SR_lock to avoid racing with new external suspend requests. |
287 // guarded by SR_lock to avoid racing with new external suspend requests. |
278 Contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL); |
288 contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL); |
279 jt->SR_lock()->unlock(); |
289 jt->SR_lock()->unlock(); |
280 } else { |
290 } else { |
281 Contended = Atomic::cmpxchg(Self, &_owner, (Thread*)NULL); |
291 contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL); |
282 } |
292 } |
283 |
293 |
284 if (Contended == Self) { |
294 if (contended == self) { |
285 _recursions ++ ; |
295 _recursions++; |
286 return ; |
296 return; |
287 } |
297 } |
288 |
298 |
289 if (Contended == NULL) { |
299 if (contended == NULL) { |
290 guarantee (_owner == Self, "invariant") ; |
300 guarantee(_owner == self, "invariant"); |
291 guarantee (_recursions == 0, "invariant") ; |
301 guarantee(_recursions == 0, "invariant"); |
292 return ; |
302 return; |
293 } |
303 } |
294 |
304 |
295 Self->set_current_pending_raw_monitor(this); |
305 self->set_current_pending_raw_monitor(this); |
296 |
306 |
297 if (!Self->is_Java_thread()) { |
307 if (!self->is_Java_thread()) { |
298 SimpleEnter (Self) ; |
308 simple_enter(self); |
299 } else { |
309 } else { |
300 guarantee (jt->thread_state() == _thread_blocked, "invariant") ; |
310 guarantee(jt->thread_state() == _thread_blocked, "invariant"); |
301 for (;;) { |
311 for (;;) { |
302 jt->set_suspend_equivalent(); |
312 jt->set_suspend_equivalent(); |
303 // cleared by handle_special_suspend_equivalent_condition() or |
313 // cleared by handle_special_suspend_equivalent_condition() or |
304 // java_suspend_self() |
314 // java_suspend_self() |
305 SimpleEnter (jt) ; |
315 simple_enter(jt); |
306 |
316 |
307 // were we externally suspended while we were waiting? |
317 // were we externally suspended while we were waiting? |
308 if (!jt->handle_special_suspend_equivalent_condition()) break ; |
318 if (!jt->handle_special_suspend_equivalent_condition()) { |
|
319 break; |
|
320 } |
309 |
321 |
310 // This thread was externally suspended |
322 // This thread was externally suspended |
311 // We have reentered the contended monitor, but while we were |
323 // We have reentered the contended monitor, but while we were |
312 // waiting another thread suspended us. We don't want to reenter |
324 // waiting another thread suspended us. We don't want to reenter |
313 // the monitor while suspended because that would surprise the |
325 // the monitor while suspended because that would surprise the |
314 // thread that suspended us. |
326 // thread that suspended us. |
315 // |
327 // |
316 // Drop the lock |
328 // Drop the lock |
317 SimpleExit (jt) ; |
329 simple_exit(jt); |
318 |
330 |
319 jt->java_suspend_self(); |
331 jt->java_suspend_self(); |
320 } |
332 } |
321 } |
333 } |
322 |
334 |
323 Self->set_current_pending_raw_monitor(NULL); |
335 self->set_current_pending_raw_monitor(NULL); |
324 |
336 |
325 guarantee (_owner == Self, "invariant") ; |
337 guarantee(_owner == self, "invariant"); |
326 guarantee (_recursions == 0, "invariant") ; |
338 guarantee(_recursions == 0, "invariant"); |
327 } |
339 } |
328 |
340 |
329 int JvmtiRawMonitor::raw_exit(Thread * Self) { |
341 int JvmtiRawMonitor::raw_exit(Thread* self) { |
330 if (Self != _owner) { |
342 if (self != _owner) { |
331 return M_ILLEGAL_MONITOR_STATE; |
343 return M_ILLEGAL_MONITOR_STATE; |
332 } |
344 } |
333 if (_recursions > 0) { |
345 if (_recursions > 0) { |
334 --_recursions ; |
346 _recursions--; |
335 } else { |
347 } else { |
336 SimpleExit (Self) ; |
348 simple_exit(self); |
337 } |
349 } |
338 |
350 |
339 return M_OK; |
351 return M_OK; |
340 } |
352 } |
341 |
353 |
342 // All JavaThreads will enter here with state _thread_blocked |
354 // All JavaThreads will enter here with state _thread_blocked |
343 |
355 |
344 int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread * Self) { |
356 int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) { |
345 if (Self != _owner) { |
357 if (self != _owner) { |
346 return M_ILLEGAL_MONITOR_STATE; |
358 return M_ILLEGAL_MONITOR_STATE; |
347 } |
359 } |
348 |
360 |
349 // To avoid spurious wakeups we reset the parkevent -- This is strictly optional. |
361 // To avoid spurious wakeups we reset the parkevent. This is strictly optional. |
350 // The caller must be able to tolerate spurious returns from raw_wait(). |
362 // The caller must be able to tolerate spurious returns from raw_wait(). |
351 Self->_ParkEvent->reset() ; |
363 self->_ParkEvent->reset(); |
352 OrderAccess::fence() ; |
364 OrderAccess::fence(); |
353 |
365 |
354 JavaThread * jt = NULL; |
366 JavaThread* jt = NULL; |
355 // check interrupt event |
367 // check interrupt event |
356 if (interruptible) { |
368 if (interruptible) { |
357 assert(Self->is_Java_thread(), "Only JavaThreads can be interruptible"); |
369 assert(self->is_Java_thread(), "Only JavaThreads can be interruptible"); |
358 jt = (JavaThread*) Self; |
370 jt = (JavaThread*)self; |
359 if (jt->is_interrupted(true)) { |
371 if (jt->is_interrupted(true)) { |
360 return M_INTERRUPTED; |
372 return M_INTERRUPTED; |
361 } |
373 } |
362 } else { |
374 } else { |
363 assert(!Self->is_Java_thread(), "JavaThreads must be interuptible"); |
375 assert(!self->is_Java_thread(), "JavaThreads must be interuptible"); |
364 } |
376 } |
365 |
377 |
366 intptr_t save = _recursions ; |
378 intptr_t save = _recursions; |
367 _recursions = 0 ; |
379 _recursions = 0; |
368 _waiters ++ ; |
380 _waiters++; |
369 if (Self->is_Java_thread()) { |
381 if (self->is_Java_thread()) { |
370 guarantee (jt->thread_state() == _thread_blocked, "invariant") ; |
382 guarantee(jt->thread_state() == _thread_blocked, "invariant"); |
371 jt->set_suspend_equivalent(); |
383 jt->set_suspend_equivalent(); |
372 } |
384 } |
373 int rv = SimpleWait (Self, millis) ; |
385 int rv = simple_wait(self, millis); |
374 _recursions = save ; |
386 _recursions = save; |
375 _waiters -- ; |
387 _waiters--; |
376 |
388 |
377 guarantee (Self == _owner, "invariant") ; |
389 guarantee(self == _owner, "invariant"); |
378 if (Self->is_Java_thread()) { |
390 if (self->is_Java_thread()) { |
379 for (;;) { |
391 for (;;) { |
380 if (!jt->handle_special_suspend_equivalent_condition()) break ; |
392 if (!jt->handle_special_suspend_equivalent_condition()) { |
381 SimpleExit (jt) ; |
393 break; |
382 jt->java_suspend_self(); |
394 } |
383 SimpleEnter (jt) ; |
395 simple_exit(jt); |
384 jt->set_suspend_equivalent() ; |
396 jt->java_suspend_self(); |
385 } |
397 simple_enter(jt); |
386 guarantee (jt == _owner, "invariant") ; |
398 jt->set_suspend_equivalent(); |
|
399 } |
|
400 guarantee(jt == _owner, "invariant"); |
387 } |
401 } |
388 |
402 |
389 if (interruptible && jt->is_interrupted(true)) { |
403 if (interruptible && jt->is_interrupted(true)) { |
390 return M_INTERRUPTED; |
404 return M_INTERRUPTED; |
391 } |
405 } |
392 |
406 |
393 return M_OK ; |
407 return M_OK; |
394 } |
408 } |
395 |
409 |
396 int JvmtiRawMonitor::raw_notify(Thread * Self) { |
410 int JvmtiRawMonitor::raw_notify(Thread* self) { |
397 if (Self != _owner) { |
411 if (self != _owner) { |
398 return M_ILLEGAL_MONITOR_STATE; |
412 return M_ILLEGAL_MONITOR_STATE; |
399 } |
413 } |
400 SimpleNotify (Self, false) ; |
414 simple_notify(self, false); |
401 return M_OK; |
415 return M_OK; |
402 } |
416 } |
403 |
417 |
404 int JvmtiRawMonitor::raw_notifyAll(Thread * Self) { |
418 int JvmtiRawMonitor::raw_notifyAll(Thread* self) { |
405 if (Self != _owner) { |
419 if (self != _owner) { |
406 return M_ILLEGAL_MONITOR_STATE; |
420 return M_ILLEGAL_MONITOR_STATE; |
407 } |
421 } |
408 SimpleNotify (Self, true) ; |
422 simple_notify(self, true); |
409 return M_OK; |
423 return M_OK; |
410 } |
424 } |