author | twisti |
Tue, 09 Mar 2010 20:16:19 +0100 | |
changeset 5046 | 27e801a857cb |
parent 4901 | 304ce755c6ee |
child 5089 | 0cce506a0158 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
4901
304ce755c6ee
6782663: Data produced by PrintGCApplicationConcurrentTime and PrintGCApplicationStoppedTime is not accurate.
johnc
parents:
4489
diff
changeset
|
2 |
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_vmThread.cpp.incl" |
|
27 |
||
28 |
// Dummy VM operation to act as first element in our circular double-linked list |
|
29 |
class VM_Dummy: public VM_Operation { |
|
30 |
VMOp_Type type() const { return VMOp_Dummy; } |
|
31 |
void doit() {}; |
|
32 |
}; |
|
33 |
||
34 |
VMOperationQueue::VMOperationQueue() { |
|
35 |
// The queue is a circular doubled-linked list, which always contains |
|
36 |
// one element (i.e., one element means empty). |
|
37 |
for(int i = 0; i < nof_priorities; i++) { |
|
38 |
_queue_length[i] = 0; |
|
39 |
_queue_counter = 0; |
|
40 |
_queue[i] = new VM_Dummy(); |
|
41 |
_queue[i]->set_next(_queue[i]); |
|
42 |
_queue[i]->set_prev(_queue[i]); |
|
43 |
} |
|
44 |
_drain_list = NULL; |
|
45 |
} |
|
46 |
||
47 |
||
48 |
bool VMOperationQueue::queue_empty(int prio) { |
|
49 |
// It is empty if there is exactly one element |
|
50 |
bool empty = (_queue[prio] == _queue[prio]->next()); |
|
51 |
assert( (_queue_length[prio] == 0 && empty) || |
|
52 |
(_queue_length[prio] > 0 && !empty), "sanity check"); |
|
53 |
return _queue_length[prio] == 0; |
|
54 |
} |
|
55 |
||
56 |
// Inserts an element to the right of the q element |
|
57 |
void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) { |
|
58 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
59 |
n->set_prev(q); |
|
60 |
n->set_next(q->next()); |
|
61 |
q->next()->set_prev(n); |
|
62 |
q->set_next(n); |
|
63 |
} |
|
64 |
||
65 |
void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) { |
|
66 |
_queue_length[prio]++; |
|
67 |
insert(_queue[prio]->next(), op); |
|
68 |
} |
|
69 |
||
70 |
void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) { |
|
71 |
_queue_length[prio]++; |
|
72 |
insert(_queue[prio]->prev(), op); |
|
73 |
} |
|
74 |
||
75 |
||
76 |
void VMOperationQueue::unlink(VM_Operation* q) { |
|
77 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
78 |
q->prev()->set_next(q->next()); |
|
79 |
q->next()->set_prev(q->prev()); |
|
80 |
} |
|
81 |
||
82 |
VM_Operation* VMOperationQueue::queue_remove_front(int prio) { |
|
83 |
if (queue_empty(prio)) return NULL; |
|
84 |
assert(_queue_length[prio] >= 0, "sanity check"); |
|
85 |
_queue_length[prio]--; |
|
86 |
VM_Operation* r = _queue[prio]->next(); |
|
87 |
assert(r != _queue[prio], "cannot remove base element"); |
|
88 |
unlink(r); |
|
89 |
return r; |
|
90 |
} |
|
91 |
||
92 |
VM_Operation* VMOperationQueue::queue_drain(int prio) { |
|
93 |
if (queue_empty(prio)) return NULL; |
|
94 |
DEBUG_ONLY(int length = _queue_length[prio];); |
|
95 |
assert(length >= 0, "sanity check"); |
|
96 |
_queue_length[prio] = 0; |
|
97 |
VM_Operation* r = _queue[prio]->next(); |
|
98 |
assert(r != _queue[prio], "cannot remove base element"); |
|
99 |
// remove links to base element from head and tail |
|
100 |
r->set_prev(NULL); |
|
101 |
_queue[prio]->prev()->set_next(NULL); |
|
102 |
// restore queue to empty state |
|
103 |
_queue[prio]->set_next(_queue[prio]); |
|
104 |
_queue[prio]->set_prev(_queue[prio]); |
|
105 |
assert(queue_empty(prio), "drain corrupted queue") |
|
106 |
#ifdef DEBUG |
|
107 |
int len = 0; |
|
108 |
VM_Operation* cur; |
|
109 |
for(cur = r; cur != NULL; cur=cur->next()) len++; |
|
110 |
assert(len == length, "drain lost some ops"); |
|
111 |
#endif |
|
112 |
return r; |
|
113 |
} |
|
114 |
||
115 |
void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) { |
|
116 |
VM_Operation* cur = _queue[queue]; |
|
117 |
cur = cur->next(); |
|
118 |
while (cur != _queue[queue]) { |
|
119 |
cur->oops_do(f); |
|
120 |
cur = cur->next(); |
|
121 |
} |
|
122 |
} |
|
123 |
||
124 |
void VMOperationQueue::drain_list_oops_do(OopClosure* f) { |
|
125 |
VM_Operation* cur = _drain_list; |
|
126 |
while (cur != NULL) { |
|
127 |
cur->oops_do(f); |
|
128 |
cur = cur->next(); |
|
129 |
} |
|
130 |
} |
|
131 |
||
132 |
//----------------------------------------------------------------- |
|
133 |
// High-level interface |
|
134 |
bool VMOperationQueue::add(VM_Operation *op) { |
|
135 |
// Encapsulates VM queue policy. Currently, that |
|
136 |
// only involves putting them on the right list |
|
137 |
if (op->evaluate_at_safepoint()) { |
|
138 |
queue_add_back(SafepointPriority, op); |
|
139 |
return true; |
|
140 |
} |
|
141 |
||
142 |
queue_add_back(MediumPriority, op); |
|
143 |
return true; |
|
144 |
} |
|
145 |
||
146 |
VM_Operation* VMOperationQueue::remove_next() { |
|
147 |
// Assuming VMOperation queue is two-level priority queue. If there are |
|
148 |
// more than two priorities, we need a different scheduling algorithm. |
|
149 |
assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2, |
|
150 |
"current algorithm does not work"); |
|
151 |
||
152 |
// simple counter based scheduling to prevent starvation of lower priority |
|
153 |
// queue. -- see 4390175 |
|
154 |
int high_prio, low_prio; |
|
155 |
if (_queue_counter++ < 10) { |
|
156 |
high_prio = SafepointPriority; |
|
157 |
low_prio = MediumPriority; |
|
158 |
} else { |
|
159 |
_queue_counter = 0; |
|
160 |
high_prio = MediumPriority; |
|
161 |
low_prio = SafepointPriority; |
|
162 |
} |
|
163 |
||
164 |
return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio); |
|
165 |
} |
|
166 |
||
167 |
void VMOperationQueue::oops_do(OopClosure* f) { |
|
168 |
for(int i = 0; i < nof_priorities; i++) { |
|
169 |
queue_oops_do(i, f); |
|
170 |
} |
|
171 |
drain_list_oops_do(f); |
|
172 |
} |
|
173 |
||
174 |
||
175 |
//------------------------------------------------------------------------------------------------------------------ |
|
176 |
// Implementation of VMThread stuff |
|
177 |
||
178 |
bool VMThread::_should_terminate = false; |
|
179 |
bool VMThread::_terminated = false; |
|
180 |
Monitor* VMThread::_terminate_lock = NULL; |
|
181 |
VMThread* VMThread::_vm_thread = NULL; |
|
182 |
VM_Operation* VMThread::_cur_vm_operation = NULL; |
|
183 |
VMOperationQueue* VMThread::_vm_queue = NULL; |
|
184 |
PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; |
|
185 |
||
186 |
||
187 |
void VMThread::create() { |
|
188 |
assert(vm_thread() == NULL, "we can only allocate one VMThread"); |
|
189 |
_vm_thread = new VMThread(); |
|
190 |
||
191 |
// Create VM operation queue |
|
192 |
_vm_queue = new VMOperationQueue(); |
|
193 |
guarantee(_vm_queue != NULL, "just checking"); |
|
194 |
||
195 |
_terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true); |
|
196 |
||
197 |
if (UsePerfData) { |
|
198 |
// jvmstat performance counters |
|
199 |
Thread* THREAD = Thread::current(); |
|
200 |
_perf_accumulated_vm_operation_time = |
|
201 |
PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime", |
|
202 |
PerfData::U_Ticks, CHECK); |
|
203 |
} |
|
204 |
} |
|
205 |
||
206 |
||
4489
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
207 |
VMThread::VMThread() : NamedThread() { |
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
208 |
set_name("VM Thread"); |
1 | 209 |
} |
210 |
||
211 |
void VMThread::destroy() { |
|
212 |
if (_vm_thread != NULL) { |
|
213 |
delete _vm_thread; |
|
214 |
_vm_thread = NULL; // VM thread is gone |
|
215 |
} |
|
216 |
} |
|
217 |
||
218 |
void VMThread::run() { |
|
219 |
assert(this == vm_thread(), "check"); |
|
220 |
||
221 |
this->initialize_thread_local_storage(); |
|
222 |
this->record_stack_base_and_size(); |
|
223 |
// Notify_lock wait checks on active_handles() to rewait in |
|
224 |
// case of spurious wakeup, it should wait on the last |
|
225 |
// value set prior to the notify |
|
226 |
this->set_active_handles(JNIHandleBlock::allocate_block()); |
|
227 |
||
228 |
{ |
|
229 |
MutexLocker ml(Notify_lock); |
|
230 |
Notify_lock->notify(); |
|
231 |
} |
|
232 |
// Notify_lock is destroyed by Threads::create_vm() |
|
233 |
||
234 |
int prio = (VMThreadPriority == -1) |
|
235 |
? os::java_to_os_priority[NearMaxPriority] |
|
236 |
: VMThreadPriority; |
|
237 |
// Note that I cannot call os::set_priority because it expects Java |
|
238 |
// priorities and I am *explicitly* using OS priorities so that it's |
|
239 |
// possible to set the VM thread priority higher than any Java thread. |
|
240 |
os::set_native_priority( this, prio ); |
|
241 |
||
242 |
// Wait for VM_Operations until termination |
|
243 |
this->loop(); |
|
244 |
||
245 |
// Note the intention to exit before safepointing. |
|
246 |
// 6295565 This has the effect of waiting for any large tty |
|
247 |
// outputs to finish. |
|
248 |
if (xtty != NULL) { |
|
249 |
ttyLocker ttyl; |
|
250 |
xtty->begin_elem("destroy_vm"); |
|
251 |
xtty->stamp(); |
|
252 |
xtty->end_elem(); |
|
253 |
assert(should_terminate(), "termination flag must be set"); |
|
254 |
} |
|
255 |
||
256 |
// 4526887 let VM thread exit at Safepoint |
|
257 |
SafepointSynchronize::begin(); |
|
258 |
||
259 |
if (VerifyBeforeExit) { |
|
260 |
HandleMark hm(VMThread::vm_thread()); |
|
261 |
// Among other things, this ensures that Eden top is correct. |
|
262 |
Universe::heap()->prepare_for_verify(); |
|
263 |
os::check_heap(); |
|
264 |
Universe::verify(true, true); // Silent verification to not polute normal output |
|
265 |
} |
|
266 |
||
267 |
CompileBroker::set_should_block(); |
|
268 |
||
269 |
// wait for threads (compiler threads or daemon threads) in the |
|
270 |
// _thread_in_native state to block. |
|
271 |
VM_Exit::wait_for_threads_in_native_to_block(); |
|
272 |
||
273 |
// signal other threads that VM process is gone |
|
274 |
{ |
|
275 |
// Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows |
|
276 |
// VM thread to enter any lock at Safepoint as long as its _owner is NULL. |
|
277 |
// If that happens after _terminate_lock->wait() has unset _owner |
|
278 |
// but before it actually drops the lock and waits, the notification below |
|
279 |
// may get lost and we will have a hang. To avoid this, we need to use |
|
280 |
// Mutex::lock_without_safepoint_check(). |
|
281 |
MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
282 |
_terminated = true; |
|
283 |
_terminate_lock->notify(); |
|
284 |
} |
|
285 |
||
286 |
// Deletion must be done synchronously by the JNI DestroyJavaVM thread |
|
287 |
// so that the VMThread deletion completes before the main thread frees |
|
288 |
// up the CodeHeap. |
|
289 |
||
290 |
} |
|
291 |
||
292 |
||
293 |
// Notify the VMThread that the last non-daemon JavaThread has terminated, |
|
294 |
// and wait until operation is performed. |
|
295 |
void VMThread::wait_for_vm_thread_exit() { |
|
296 |
{ MutexLocker mu(VMOperationQueue_lock); |
|
297 |
_should_terminate = true; |
|
298 |
VMOperationQueue_lock->notify(); |
|
299 |
} |
|
300 |
||
301 |
// Note: VM thread leaves at Safepoint. We are not stopped by Safepoint |
|
302 |
// because this thread has been removed from the threads list. But anything |
|
303 |
// that could get blocked by Safepoint should not be used after this point, |
|
304 |
// otherwise we will hang, since there is no one can end the safepoint. |
|
305 |
||
306 |
// Wait until VM thread is terminated |
|
307 |
// Note: it should be OK to use Terminator_lock here. But this is called |
|
308 |
// at a very delicate time (VM shutdown) and we are operating in non- VM |
|
309 |
// thread at Safepoint. It's safer to not share lock with other threads. |
|
310 |
{ MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
311 |
while(!VMThread::is_terminated()) { |
|
312 |
_terminate_lock->wait(Mutex::_no_safepoint_check_flag); |
|
313 |
} |
|
314 |
} |
|
315 |
} |
|
316 |
||
317 |
void VMThread::print_on(outputStream* st) const { |
|
318 |
st->print("\"%s\" ", name()); |
|
319 |
Thread::print_on(st); |
|
320 |
st->cr(); |
|
321 |
} |
|
322 |
||
323 |
void VMThread::evaluate_operation(VM_Operation* op) { |
|
324 |
ResourceMark rm; |
|
325 |
||
326 |
{ |
|
327 |
PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); |
|
328 |
op->evaluate(); |
|
329 |
} |
|
330 |
||
331 |
// Last access of info in _cur_vm_operation! |
|
332 |
bool c_heap_allocated = op->is_cheap_allocated(); |
|
333 |
||
334 |
// Mark as completed |
|
335 |
if (!op->evaluate_concurrently()) { |
|
336 |
op->calling_thread()->increment_vm_operation_completed_count(); |
|
337 |
} |
|
338 |
// It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call, |
|
339 |
// since if it is stack allocated the calling thread might have deallocated |
|
340 |
if (c_heap_allocated) { |
|
341 |
delete _cur_vm_operation; |
|
342 |
} |
|
343 |
} |
|
344 |
||
345 |
||
346 |
void VMThread::loop() { |
|
347 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
348 |
||
349 |
while(true) { |
|
350 |
VM_Operation* safepoint_ops = NULL; |
|
351 |
// |
|
352 |
// Wait for VM operation |
|
353 |
// |
|
354 |
// use no_safepoint_check to get lock without attempting to "sneak" |
|
355 |
{ MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
356 |
Mutex::_no_safepoint_check_flag); |
|
357 |
||
358 |
// Look for new operation |
|
359 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
360 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
361 |
||
362 |
// Stall time tracking code |
|
363 |
if (PrintVMQWaitTime && _cur_vm_operation != NULL && |
|
364 |
!_cur_vm_operation->evaluate_concurrently()) { |
|
365 |
long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp(); |
|
366 |
if (stall > 0) |
|
367 |
tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall); |
|
368 |
} |
|
369 |
||
370 |
while (!should_terminate() && _cur_vm_operation == NULL) { |
|
371 |
// wait with a timeout to guarantee safepoints at regular intervals |
|
372 |
bool timedout = |
|
373 |
VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag, |
|
374 |
GuaranteedSafepointInterval); |
|
375 |
||
376 |
// Support for self destruction |
|
377 |
if ((SelfDestructTimer != 0) && !is_error_reported() && |
|
378 |
(os::elapsedTime() > SelfDestructTimer * 60)) { |
|
379 |
tty->print_cr("VM self-destructed"); |
|
380 |
exit(-1); |
|
381 |
} |
|
382 |
||
383 |
if (timedout && (SafepointALot || |
|
384 |
SafepointSynchronize::is_cleanup_needed())) { |
|
385 |
MutexUnlockerEx mul(VMOperationQueue_lock, |
|
386 |
Mutex::_no_safepoint_check_flag); |
|
387 |
// Force a safepoint since we have not had one for at least |
|
388 |
// 'GuaranteedSafepointInterval' milliseconds. This will run all |
|
389 |
// the clean-up processing that needs to be done regularly at a |
|
390 |
// safepoint |
|
391 |
SafepointSynchronize::begin(); |
|
392 |
#ifdef ASSERT |
|
393 |
if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); |
|
394 |
#endif |
|
395 |
SafepointSynchronize::end(); |
|
396 |
} |
|
397 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
398 |
||
399 |
// If we are at a safepoint we will evaluate all the operations that |
|
400 |
// follow that also require a safepoint |
|
401 |
if (_cur_vm_operation != NULL && |
|
402 |
_cur_vm_operation->evaluate_at_safepoint()) { |
|
403 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
404 |
} |
|
405 |
} |
|
406 |
||
407 |
if (should_terminate()) break; |
|
408 |
} // Release mu_queue_lock |
|
409 |
||
410 |
// |
|
411 |
// Execute VM operation |
|
412 |
// |
|
413 |
{ HandleMark hm(VMThread::vm_thread()); |
|
414 |
||
415 |
EventMark em("Executing VM operation: %s", vm_operation()->name()); |
|
416 |
assert(_cur_vm_operation != NULL, "we should have found an operation to execute"); |
|
417 |
||
418 |
// Give the VM thread an extra quantum. Jobs tend to be bursty and this |
|
419 |
// helps the VM thread to finish up the job. |
|
420 |
// FIXME: When this is enabled and there are many threads, this can degrade |
|
421 |
// performance significantly. |
|
422 |
if( VMThreadHintNoPreempt ) |
|
423 |
os::hint_no_preempt(); |
|
424 |
||
425 |
// If we are at a safepoint we will evaluate all the operations that |
|
426 |
// follow that also require a safepoint |
|
427 |
if (_cur_vm_operation->evaluate_at_safepoint()) { |
|
428 |
||
429 |
_vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned |
|
430 |
||
431 |
SafepointSynchronize::begin(); |
|
432 |
evaluate_operation(_cur_vm_operation); |
|
433 |
// now process all queued safepoint ops, iteratively draining |
|
434 |
// the queue until there are none left |
|
435 |
do { |
|
436 |
_cur_vm_operation = safepoint_ops; |
|
437 |
if (_cur_vm_operation != NULL) { |
|
438 |
do { |
|
439 |
// evaluate_operation deletes the op object so we have |
|
440 |
// to grab the next op now |
|
441 |
VM_Operation* next = _cur_vm_operation->next(); |
|
442 |
_vm_queue->set_drain_list(next); |
|
443 |
evaluate_operation(_cur_vm_operation); |
|
444 |
_cur_vm_operation = next; |
|
445 |
if (PrintSafepointStatistics) { |
|
446 |
SafepointSynchronize::inc_vmop_coalesced_count(); |
|
447 |
} |
|
448 |
} while (_cur_vm_operation != NULL); |
|
449 |
} |
|
450 |
// There is a chance that a thread enqueued a safepoint op |
|
451 |
// since we released the op-queue lock and initiated the safepoint. |
|
452 |
// So we drain the queue again if there is anything there, as an |
|
453 |
// optimization to try and reduce the number of safepoints. |
|
454 |
// As the safepoint synchronizes us with JavaThreads we will see |
|
455 |
// any enqueue made by a JavaThread, but the peek will not |
|
456 |
// necessarily detect a concurrent enqueue by a GC thread, but |
|
457 |
// that simply means the op will wait for the next major cycle of the |
|
458 |
// VMThread - just as it would if the GC thread lost the race for |
|
459 |
// the lock. |
|
460 |
if (_vm_queue->peek_at_safepoint_priority()) { |
|
461 |
// must hold lock while draining queue |
|
462 |
MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
463 |
Mutex::_no_safepoint_check_flag); |
|
464 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
465 |
} else { |
|
466 |
safepoint_ops = NULL; |
|
467 |
} |
|
468 |
} while(safepoint_ops != NULL); |
|
469 |
||
470 |
_vm_queue->set_drain_list(NULL); |
|
471 |
||
472 |
// Complete safepoint synchronization |
|
473 |
SafepointSynchronize::end(); |
|
474 |
||
475 |
} else { // not a safepoint operation |
|
476 |
if (TraceLongCompiles) { |
|
477 |
elapsedTimer t; |
|
478 |
t.start(); |
|
479 |
evaluate_operation(_cur_vm_operation); |
|
480 |
t.stop(); |
|
481 |
double secs = t.seconds(); |
|
482 |
if (secs * 1e3 > LongCompileThreshold) { |
|
483 |
// XXX - _cur_vm_operation should not be accessed after |
|
484 |
// the completed count has been incremented; the waiting |
|
485 |
// thread may have already freed this memory. |
|
486 |
tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs); |
|
487 |
} |
|
488 |
} else { |
|
489 |
evaluate_operation(_cur_vm_operation); |
|
490 |
} |
|
491 |
||
492 |
_cur_vm_operation = NULL; |
|
493 |
} |
|
494 |
} |
|
495 |
||
496 |
// |
|
497 |
// Notify (potential) waiting Java thread(s) - lock without safepoint |
|
498 |
// check so that sneaking is not possible |
|
499 |
{ MutexLockerEx mu(VMOperationRequest_lock, |
|
500 |
Mutex::_no_safepoint_check_flag); |
|
501 |
VMOperationRequest_lock->notify_all(); |
|
502 |
} |
|
503 |
||
504 |
// |
|
505 |
// We want to make sure that we get to a safepoint regularly. |
|
506 |
// |
|
507 |
if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) { |
|
508 |
long interval = SafepointSynchronize::last_non_safepoint_interval(); |
|
509 |
bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval); |
|
510 |
if (SafepointALot || max_time_exceeded) { |
|
511 |
HandleMark hm(VMThread::vm_thread()); |
|
512 |
SafepointSynchronize::begin(); |
|
513 |
SafepointSynchronize::end(); |
|
514 |
} |
|
515 |
} |
|
516 |
} |
|
517 |
} |
|
518 |
||
519 |
void VMThread::execute(VM_Operation* op) { |
|
520 |
Thread* t = Thread::current(); |
|
521 |
||
522 |
if (!t->is_VM_thread()) { |
|
2995
d8283445992a
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
1
diff
changeset
|
523 |
SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot |
1 | 524 |
// JavaThread or WatcherThread |
525 |
t->check_for_valid_safepoint_state(true); |
|
526 |
||
527 |
// New request from Java thread, evaluate prologue |
|
528 |
if (!op->doit_prologue()) { |
|
529 |
return; // op was cancelled |
|
530 |
} |
|
531 |
||
532 |
// Setup VM_operations for execution |
|
533 |
op->set_calling_thread(t, Thread::get_priority(t)); |
|
534 |
||
535 |
// It does not make sense to execute the epilogue, if the VM operation object is getting |
|
536 |
// deallocated by the VM thread. |
|
537 |
bool concurrent = op->evaluate_concurrently(); |
|
538 |
bool execute_epilog = !op->is_cheap_allocated(); |
|
539 |
assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated"); |
|
540 |
||
541 |
// Get ticket number for non-concurrent VM operations |
|
542 |
int ticket = 0; |
|
543 |
if (!concurrent) { |
|
544 |
ticket = t->vm_operation_ticket(); |
|
545 |
} |
|
546 |
||
547 |
// Add VM operation to list of waiting threads. We are guaranteed not to block while holding the |
|
548 |
// VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests |
|
549 |
// to be queued up during a safepoint synchronization. |
|
550 |
{ |
|
551 |
VMOperationQueue_lock->lock_without_safepoint_check(); |
|
552 |
bool ok = _vm_queue->add(op); |
|
553 |
op->set_timestamp(os::javaTimeMillis()); |
|
554 |
VMOperationQueue_lock->notify(); |
|
555 |
VMOperationQueue_lock->unlock(); |
|
556 |
// VM_Operation got skipped |
|
557 |
if (!ok) { |
|
558 |
assert(concurrent, "can only skip concurrent tasks"); |
|
559 |
if (op->is_cheap_allocated()) delete op; |
|
560 |
return; |
|
561 |
} |
|
562 |
} |
|
563 |
||
564 |
if (!concurrent) { |
|
565 |
// Wait for completion of request (non-concurrent) |
|
566 |
// Note: only a JavaThread triggers the safepoint check when locking |
|
567 |
MutexLocker mu(VMOperationRequest_lock); |
|
568 |
while(t->vm_operation_completed_count() < ticket) { |
|
569 |
VMOperationRequest_lock->wait(!t->is_Java_thread()); |
|
570 |
} |
|
571 |
} |
|
572 |
||
573 |
if (execute_epilog) { |
|
574 |
op->doit_epilogue(); |
|
575 |
} |
|
576 |
} else { |
|
577 |
// invoked by VM thread; usually nested VM operation |
|
578 |
assert(t->is_VM_thread(), "must be a VM thread"); |
|
579 |
VM_Operation* prev_vm_operation = vm_operation(); |
|
580 |
if (prev_vm_operation != NULL) { |
|
581 |
// Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler |
|
582 |
// does not allow nested scavenges or compiles. |
|
583 |
if (!prev_vm_operation->allow_nested_vm_operations()) { |
|
584 |
fatal2("Nested VM operation %s requested by operation %s", op->name(), vm_operation()->name()); |
|
585 |
} |
|
586 |
op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); |
|
587 |
} |
|
588 |
||
589 |
EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name()); |
|
590 |
||
591 |
// Release all internal handles after operation is evaluated |
|
592 |
HandleMark hm(t); |
|
593 |
_cur_vm_operation = op; |
|
594 |
||
595 |
if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { |
|
596 |
SafepointSynchronize::begin(); |
|
597 |
op->evaluate(); |
|
598 |
SafepointSynchronize::end(); |
|
599 |
} else { |
|
600 |
op->evaluate(); |
|
601 |
} |
|
602 |
||
603 |
// Free memory if needed |
|
604 |
if (op->is_cheap_allocated()) delete op; |
|
605 |
||
606 |
_cur_vm_operation = prev_vm_operation; |
|
607 |
} |
|
608 |
} |
|
609 |
||
610 |
||
3908
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
611 |
void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { |
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
612 |
Thread::oops_do(f, cf); |
1 | 613 |
_vm_queue->oops_do(f); |
614 |
} |
|
615 |
||
616 |
//------------------------------------------------------------------------------------------------------------------ |
|
617 |
#ifndef PRODUCT |
|
618 |
||
619 |
void VMOperationQueue::verify_queue(int prio) { |
|
620 |
// Check that list is correctly linked |
|
621 |
int length = _queue_length[prio]; |
|
622 |
VM_Operation *cur = _queue[prio]; |
|
623 |
int i; |
|
624 |
||
625 |
// Check forward links |
|
626 |
for(i = 0; i < length; i++) { |
|
627 |
cur = cur->next(); |
|
628 |
assert(cur != _queue[prio], "list to short (forward)"); |
|
629 |
} |
|
630 |
assert(cur->next() == _queue[prio], "list to long (forward)"); |
|
631 |
||
632 |
// Check backwards links |
|
633 |
cur = _queue[prio]; |
|
634 |
for(i = 0; i < length; i++) { |
|
635 |
cur = cur->prev(); |
|
636 |
assert(cur != _queue[prio], "list to short (backwards)"); |
|
637 |
} |
|
638 |
assert(cur->prev() == _queue[prio], "list to long (backwards)"); |
|
639 |
} |
|
640 |
||
641 |
#endif |
|
642 |
||
643 |
void VMThread::verify() { |
|
3908
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
644 |
oops_do(&VerifyOopClosure::verify_oop, NULL); |
1 | 645 |
} |