author | twisti |
Fri, 02 Sep 2011 00:36:18 -0700 | |
changeset 10510 | ab626d1bdf53 |
parent 9342 | 456b8d0486b5 |
child 10565 | dc90c239f4ec |
permissions | -rw-r--r-- |
1 | 1 |
/* |
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
2 |
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "compiler/compileBroker.hpp" |
|
27 |
#include "gc_interface/collectedHeap.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
29 |
#include "oops/methodOop.hpp" |
|
30 |
#include "oops/oop.inline.hpp" |
|
31 |
#include "runtime/interfaceSupport.hpp" |
|
32 |
#include "runtime/mutexLocker.hpp" |
|
33 |
#include "runtime/os.hpp" |
|
34 |
#include "runtime/vmThread.hpp" |
|
35 |
#include "runtime/vm_operations.hpp" |
|
36 |
#include "services/runtimeService.hpp" |
|
37 |
#include "utilities/dtrace.hpp" |
|
38 |
#include "utilities/events.hpp" |
|
39 |
#include "utilities/xmlstream.hpp" |
|
40 |
#ifdef TARGET_OS_FAMILY_linux |
|
41 |
# include "thread_linux.inline.hpp" |
|
42 |
#endif |
|
43 |
#ifdef TARGET_OS_FAMILY_solaris |
|
44 |
# include "thread_solaris.inline.hpp" |
|
45 |
#endif |
|
46 |
#ifdef TARGET_OS_FAMILY_windows |
|
47 |
# include "thread_windows.inline.hpp" |
|
48 |
#endif |
|
1 | 49 |
|
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
50 |
HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int); |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
51 |
HS_DTRACE_PROBE_DECL3(hotspot, vmops__begin, char *, uintptr_t, int); |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
52 |
HS_DTRACE_PROBE_DECL3(hotspot, vmops__end, char *, uintptr_t, int); |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
53 |
|
1 | 54 |
// Dummy VM operation to act as first element in our circular double-linked list |
55 |
class VM_Dummy: public VM_Operation { |
|
56 |
VMOp_Type type() const { return VMOp_Dummy; } |
|
57 |
void doit() {}; |
|
58 |
}; |
|
59 |
||
60 |
VMOperationQueue::VMOperationQueue() { |
|
61 |
// The queue is a circular doubled-linked list, which always contains |
|
62 |
// one element (i.e., one element means empty). |
|
63 |
for(int i = 0; i < nof_priorities; i++) { |
|
64 |
_queue_length[i] = 0; |
|
65 |
_queue_counter = 0; |
|
66 |
_queue[i] = new VM_Dummy(); |
|
67 |
_queue[i]->set_next(_queue[i]); |
|
68 |
_queue[i]->set_prev(_queue[i]); |
|
69 |
} |
|
70 |
_drain_list = NULL; |
|
71 |
} |
|
72 |
||
73 |
||
74 |
bool VMOperationQueue::queue_empty(int prio) { |
|
75 |
// It is empty if there is exactly one element |
|
76 |
bool empty = (_queue[prio] == _queue[prio]->next()); |
|
77 |
assert( (_queue_length[prio] == 0 && empty) || |
|
78 |
(_queue_length[prio] > 0 && !empty), "sanity check"); |
|
79 |
return _queue_length[prio] == 0; |
|
80 |
} |
|
81 |
||
82 |
// Inserts an element to the right of the q element |
|
83 |
void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) { |
|
84 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
85 |
n->set_prev(q); |
|
86 |
n->set_next(q->next()); |
|
87 |
q->next()->set_prev(n); |
|
88 |
q->set_next(n); |
|
89 |
} |
|
90 |
||
91 |
void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) { |
|
92 |
_queue_length[prio]++; |
|
93 |
insert(_queue[prio]->next(), op); |
|
94 |
} |
|
95 |
||
96 |
void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) { |
|
97 |
_queue_length[prio]++; |
|
98 |
insert(_queue[prio]->prev(), op); |
|
99 |
} |
|
100 |
||
101 |
||
102 |
void VMOperationQueue::unlink(VM_Operation* q) { |
|
103 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
104 |
q->prev()->set_next(q->next()); |
|
105 |
q->next()->set_prev(q->prev()); |
|
106 |
} |
|
107 |
||
108 |
VM_Operation* VMOperationQueue::queue_remove_front(int prio) { |
|
109 |
if (queue_empty(prio)) return NULL; |
|
110 |
assert(_queue_length[prio] >= 0, "sanity check"); |
|
111 |
_queue_length[prio]--; |
|
112 |
VM_Operation* r = _queue[prio]->next(); |
|
113 |
assert(r != _queue[prio], "cannot remove base element"); |
|
114 |
unlink(r); |
|
115 |
return r; |
|
116 |
} |
|
117 |
||
118 |
VM_Operation* VMOperationQueue::queue_drain(int prio) { |
|
119 |
if (queue_empty(prio)) return NULL; |
|
120 |
DEBUG_ONLY(int length = _queue_length[prio];); |
|
121 |
assert(length >= 0, "sanity check"); |
|
122 |
_queue_length[prio] = 0; |
|
123 |
VM_Operation* r = _queue[prio]->next(); |
|
124 |
assert(r != _queue[prio], "cannot remove base element"); |
|
125 |
// remove links to base element from head and tail |
|
126 |
r->set_prev(NULL); |
|
127 |
_queue[prio]->prev()->set_next(NULL); |
|
128 |
// restore queue to empty state |
|
129 |
_queue[prio]->set_next(_queue[prio]); |
|
130 |
_queue[prio]->set_prev(_queue[prio]); |
|
5402
c51fd0c1d005
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
5089
diff
changeset
|
131 |
assert(queue_empty(prio), "drain corrupted queue"); |
1 | 132 |
#ifdef DEBUG |
133 |
int len = 0; |
|
134 |
VM_Operation* cur; |
|
135 |
for(cur = r; cur != NULL; cur=cur->next()) len++; |
|
136 |
assert(len == length, "drain lost some ops"); |
|
137 |
#endif |
|
138 |
return r; |
|
139 |
} |
|
140 |
||
141 |
void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) { |
|
142 |
VM_Operation* cur = _queue[queue]; |
|
143 |
cur = cur->next(); |
|
144 |
while (cur != _queue[queue]) { |
|
145 |
cur->oops_do(f); |
|
146 |
cur = cur->next(); |
|
147 |
} |
|
148 |
} |
|
149 |
||
150 |
void VMOperationQueue::drain_list_oops_do(OopClosure* f) { |
|
151 |
VM_Operation* cur = _drain_list; |
|
152 |
while (cur != NULL) { |
|
153 |
cur->oops_do(f); |
|
154 |
cur = cur->next(); |
|
155 |
} |
|
156 |
} |
|
157 |
||
158 |
//----------------------------------------------------------------- |
|
159 |
// High-level interface |
|
160 |
bool VMOperationQueue::add(VM_Operation *op) { |
|
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
161 |
|
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
162 |
HS_DTRACE_PROBE3(hotspot, vmops__request, op->name(), strlen(op->name()), |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
163 |
op->evaluation_mode()); |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
164 |
|
1 | 165 |
// Encapsulates VM queue policy. Currently, that |
166 |
// only involves putting them on the right list |
|
167 |
if (op->evaluate_at_safepoint()) { |
|
168 |
queue_add_back(SafepointPriority, op); |
|
169 |
return true; |
|
170 |
} |
|
171 |
||
172 |
queue_add_back(MediumPriority, op); |
|
173 |
return true; |
|
174 |
} |
|
175 |
||
176 |
VM_Operation* VMOperationQueue::remove_next() { |
|
177 |
// Assuming VMOperation queue is two-level priority queue. If there are |
|
178 |
// more than two priorities, we need a different scheduling algorithm. |
|
179 |
assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2, |
|
180 |
"current algorithm does not work"); |
|
181 |
||
182 |
// simple counter based scheduling to prevent starvation of lower priority |
|
183 |
// queue. -- see 4390175 |
|
184 |
int high_prio, low_prio; |
|
185 |
if (_queue_counter++ < 10) { |
|
186 |
high_prio = SafepointPriority; |
|
187 |
low_prio = MediumPriority; |
|
188 |
} else { |
|
189 |
_queue_counter = 0; |
|
190 |
high_prio = MediumPriority; |
|
191 |
low_prio = SafepointPriority; |
|
192 |
} |
|
193 |
||
194 |
return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio); |
|
195 |
} |
|
196 |
||
197 |
void VMOperationQueue::oops_do(OopClosure* f) { |
|
198 |
for(int i = 0; i < nof_priorities; i++) { |
|
199 |
queue_oops_do(i, f); |
|
200 |
} |
|
201 |
drain_list_oops_do(f); |
|
202 |
} |
|
203 |
||
204 |
||
205 |
//------------------------------------------------------------------------------------------------------------------ |
|
206 |
// Implementation of VMThread stuff |
|
207 |
||
208 |
bool VMThread::_should_terminate = false; |
|
209 |
bool VMThread::_terminated = false; |
|
210 |
Monitor* VMThread::_terminate_lock = NULL; |
|
211 |
VMThread* VMThread::_vm_thread = NULL; |
|
212 |
VM_Operation* VMThread::_cur_vm_operation = NULL; |
|
213 |
VMOperationQueue* VMThread::_vm_queue = NULL; |
|
214 |
PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; |
|
215 |
||
216 |
||
217 |
void VMThread::create() { |
|
218 |
assert(vm_thread() == NULL, "we can only allocate one VMThread"); |
|
219 |
_vm_thread = new VMThread(); |
|
220 |
||
221 |
// Create VM operation queue |
|
222 |
_vm_queue = new VMOperationQueue(); |
|
223 |
guarantee(_vm_queue != NULL, "just checking"); |
|
224 |
||
225 |
_terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true); |
|
226 |
||
227 |
if (UsePerfData) { |
|
228 |
// jvmstat performance counters |
|
229 |
Thread* THREAD = Thread::current(); |
|
230 |
_perf_accumulated_vm_operation_time = |
|
231 |
PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime", |
|
232 |
PerfData::U_Ticks, CHECK); |
|
233 |
} |
|
234 |
} |
|
235 |
||
236 |
||
4489
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
237 |
VMThread::VMThread() : NamedThread() { |
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
238 |
set_name("VM Thread"); |
1 | 239 |
} |
240 |
||
241 |
void VMThread::destroy() { |
|
242 |
if (_vm_thread != NULL) { |
|
243 |
delete _vm_thread; |
|
244 |
_vm_thread = NULL; // VM thread is gone |
|
245 |
} |
|
246 |
} |
|
247 |
||
248 |
void VMThread::run() { |
|
249 |
assert(this == vm_thread(), "check"); |
|
250 |
||
251 |
this->initialize_thread_local_storage(); |
|
252 |
this->record_stack_base_and_size(); |
|
253 |
// Notify_lock wait checks on active_handles() to rewait in |
|
254 |
// case of spurious wakeup, it should wait on the last |
|
255 |
// value set prior to the notify |
|
256 |
this->set_active_handles(JNIHandleBlock::allocate_block()); |
|
257 |
||
258 |
{ |
|
259 |
MutexLocker ml(Notify_lock); |
|
260 |
Notify_lock->notify(); |
|
261 |
} |
|
262 |
// Notify_lock is destroyed by Threads::create_vm() |
|
263 |
||
264 |
int prio = (VMThreadPriority == -1) |
|
265 |
? os::java_to_os_priority[NearMaxPriority] |
|
266 |
: VMThreadPriority; |
|
267 |
// Note that I cannot call os::set_priority because it expects Java |
|
268 |
// priorities and I am *explicitly* using OS priorities so that it's |
|
269 |
// possible to set the VM thread priority higher than any Java thread. |
|
270 |
os::set_native_priority( this, prio ); |
|
271 |
||
272 |
// Wait for VM_Operations until termination |
|
273 |
this->loop(); |
|
274 |
||
275 |
// Note the intention to exit before safepointing. |
|
276 |
// 6295565 This has the effect of waiting for any large tty |
|
277 |
// outputs to finish. |
|
278 |
if (xtty != NULL) { |
|
279 |
ttyLocker ttyl; |
|
280 |
xtty->begin_elem("destroy_vm"); |
|
281 |
xtty->stamp(); |
|
282 |
xtty->end_elem(); |
|
283 |
assert(should_terminate(), "termination flag must be set"); |
|
284 |
} |
|
285 |
||
286 |
// 4526887 let VM thread exit at Safepoint |
|
287 |
SafepointSynchronize::begin(); |
|
288 |
||
289 |
if (VerifyBeforeExit) { |
|
290 |
HandleMark hm(VMThread::vm_thread()); |
|
291 |
// Among other things, this ensures that Eden top is correct. |
|
292 |
Universe::heap()->prepare_for_verify(); |
|
293 |
os::check_heap(); |
|
9342
456b8d0486b5
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
7397
diff
changeset
|
294 |
// Silent verification so as not to pollute normal output, |
456b8d0486b5
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
7397
diff
changeset
|
295 |
// unless we really asked for it. |
456b8d0486b5
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
7397
diff
changeset
|
296 |
Universe::verify(true, !(PrintGCDetails || Verbose)); |
1 | 297 |
} |
298 |
||
299 |
CompileBroker::set_should_block(); |
|
300 |
||
301 |
// wait for threads (compiler threads or daemon threads) in the |
|
302 |
// _thread_in_native state to block. |
|
303 |
VM_Exit::wait_for_threads_in_native_to_block(); |
|
304 |
||
305 |
// signal other threads that VM process is gone |
|
306 |
{ |
|
307 |
// Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows |
|
308 |
// VM thread to enter any lock at Safepoint as long as its _owner is NULL. |
|
309 |
// If that happens after _terminate_lock->wait() has unset _owner |
|
310 |
// but before it actually drops the lock and waits, the notification below |
|
311 |
// may get lost and we will have a hang. To avoid this, we need to use |
|
312 |
// Mutex::lock_without_safepoint_check(). |
|
313 |
MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
314 |
_terminated = true; |
|
315 |
_terminate_lock->notify(); |
|
316 |
} |
|
317 |
||
318 |
// Deletion must be done synchronously by the JNI DestroyJavaVM thread |
|
319 |
// so that the VMThread deletion completes before the main thread frees |
|
320 |
// up the CodeHeap. |
|
321 |
||
322 |
} |
|
323 |
||
324 |
||
325 |
// Notify the VMThread that the last non-daemon JavaThread has terminated, |
|
326 |
// and wait until operation is performed. |
|
327 |
void VMThread::wait_for_vm_thread_exit() { |
|
328 |
{ MutexLocker mu(VMOperationQueue_lock); |
|
329 |
_should_terminate = true; |
|
330 |
VMOperationQueue_lock->notify(); |
|
331 |
} |
|
332 |
||
333 |
// Note: VM thread leaves at Safepoint. We are not stopped by Safepoint |
|
334 |
// because this thread has been removed from the threads list. But anything |
|
335 |
// that could get blocked by Safepoint should not be used after this point, |
|
336 |
// otherwise we will hang, since there is no one can end the safepoint. |
|
337 |
||
338 |
// Wait until VM thread is terminated |
|
339 |
// Note: it should be OK to use Terminator_lock here. But this is called |
|
340 |
// at a very delicate time (VM shutdown) and we are operating in non- VM |
|
341 |
// thread at Safepoint. It's safer to not share lock with other threads. |
|
342 |
{ MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
343 |
while(!VMThread::is_terminated()) { |
|
344 |
_terminate_lock->wait(Mutex::_no_safepoint_check_flag); |
|
345 |
} |
|
346 |
} |
|
347 |
} |
|
348 |
||
349 |
void VMThread::print_on(outputStream* st) const { |
|
350 |
st->print("\"%s\" ", name()); |
|
351 |
Thread::print_on(st); |
|
352 |
st->cr(); |
|
353 |
} |
|
354 |
||
355 |
void VMThread::evaluate_operation(VM_Operation* op) { |
|
356 |
ResourceMark rm; |
|
357 |
||
358 |
{ |
|
359 |
PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); |
|
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
360 |
HS_DTRACE_PROBE3(hotspot, vmops__begin, op->name(), strlen(op->name()), |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
361 |
op->evaluation_mode()); |
1 | 362 |
op->evaluate(); |
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
363 |
HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()), |
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
364 |
op->evaluation_mode()); |
1 | 365 |
} |
366 |
||
367 |
// Last access of info in _cur_vm_operation! |
|
368 |
bool c_heap_allocated = op->is_cheap_allocated(); |
|
369 |
||
370 |
// Mark as completed |
|
371 |
if (!op->evaluate_concurrently()) { |
|
372 |
op->calling_thread()->increment_vm_operation_completed_count(); |
|
373 |
} |
|
374 |
// It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call, |
|
375 |
// since if it is stack allocated the calling thread might have deallocated |
|
376 |
if (c_heap_allocated) { |
|
377 |
delete _cur_vm_operation; |
|
378 |
} |
|
379 |
} |
|
380 |
||
381 |
||
382 |
void VMThread::loop() { |
|
383 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
384 |
||
385 |
while(true) { |
|
386 |
VM_Operation* safepoint_ops = NULL; |
|
387 |
// |
|
388 |
// Wait for VM operation |
|
389 |
// |
|
390 |
// use no_safepoint_check to get lock without attempting to "sneak" |
|
391 |
{ MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
392 |
Mutex::_no_safepoint_check_flag); |
|
393 |
||
394 |
// Look for new operation |
|
395 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
396 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
397 |
||
398 |
// Stall time tracking code |
|
399 |
if (PrintVMQWaitTime && _cur_vm_operation != NULL && |
|
400 |
!_cur_vm_operation->evaluate_concurrently()) { |
|
401 |
long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp(); |
|
402 |
if (stall > 0) |
|
403 |
tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall); |
|
404 |
} |
|
405 |
||
406 |
while (!should_terminate() && _cur_vm_operation == NULL) { |
|
407 |
// wait with a timeout to guarantee safepoints at regular intervals |
|
408 |
bool timedout = |
|
409 |
VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag, |
|
410 |
GuaranteedSafepointInterval); |
|
411 |
||
412 |
// Support for self destruction |
|
413 |
if ((SelfDestructTimer != 0) && !is_error_reported() && |
|
414 |
(os::elapsedTime() > SelfDestructTimer * 60)) { |
|
415 |
tty->print_cr("VM self-destructed"); |
|
416 |
exit(-1); |
|
417 |
} |
|
418 |
||
419 |
if (timedout && (SafepointALot || |
|
420 |
SafepointSynchronize::is_cleanup_needed())) { |
|
421 |
MutexUnlockerEx mul(VMOperationQueue_lock, |
|
422 |
Mutex::_no_safepoint_check_flag); |
|
423 |
// Force a safepoint since we have not had one for at least |
|
424 |
// 'GuaranteedSafepointInterval' milliseconds. This will run all |
|
425 |
// the clean-up processing that needs to be done regularly at a |
|
426 |
// safepoint |
|
427 |
SafepointSynchronize::begin(); |
|
428 |
#ifdef ASSERT |
|
429 |
if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); |
|
430 |
#endif |
|
431 |
SafepointSynchronize::end(); |
|
432 |
} |
|
433 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
434 |
||
435 |
// If we are at a safepoint we will evaluate all the operations that |
|
436 |
// follow that also require a safepoint |
|
437 |
if (_cur_vm_operation != NULL && |
|
438 |
_cur_vm_operation->evaluate_at_safepoint()) { |
|
439 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
440 |
} |
|
441 |
} |
|
442 |
||
443 |
if (should_terminate()) break; |
|
444 |
} // Release mu_queue_lock |
|
445 |
||
446 |
// |
|
447 |
// Execute VM operation |
|
448 |
// |
|
449 |
{ HandleMark hm(VMThread::vm_thread()); |
|
450 |
||
451 |
EventMark em("Executing VM operation: %s", vm_operation()->name()); |
|
452 |
assert(_cur_vm_operation != NULL, "we should have found an operation to execute"); |
|
453 |
||
454 |
// Give the VM thread an extra quantum. Jobs tend to be bursty and this |
|
455 |
// helps the VM thread to finish up the job. |
|
456 |
// FIXME: When this is enabled and there are many threads, this can degrade |
|
457 |
// performance significantly. |
|
458 |
if( VMThreadHintNoPreempt ) |
|
459 |
os::hint_no_preempt(); |
|
460 |
||
461 |
// If we are at a safepoint we will evaluate all the operations that |
|
462 |
// follow that also require a safepoint |
|
463 |
if (_cur_vm_operation->evaluate_at_safepoint()) { |
|
464 |
||
465 |
_vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned |
|
466 |
||
467 |
SafepointSynchronize::begin(); |
|
468 |
evaluate_operation(_cur_vm_operation); |
|
469 |
// now process all queued safepoint ops, iteratively draining |
|
470 |
// the queue until there are none left |
|
471 |
do { |
|
472 |
_cur_vm_operation = safepoint_ops; |
|
473 |
if (_cur_vm_operation != NULL) { |
|
474 |
do { |
|
475 |
// evaluate_operation deletes the op object so we have |
|
476 |
// to grab the next op now |
|
477 |
VM_Operation* next = _cur_vm_operation->next(); |
|
478 |
_vm_queue->set_drain_list(next); |
|
479 |
evaluate_operation(_cur_vm_operation); |
|
480 |
_cur_vm_operation = next; |
|
481 |
if (PrintSafepointStatistics) { |
|
482 |
SafepointSynchronize::inc_vmop_coalesced_count(); |
|
483 |
} |
|
484 |
} while (_cur_vm_operation != NULL); |
|
485 |
} |
|
486 |
// There is a chance that a thread enqueued a safepoint op |
|
487 |
// since we released the op-queue lock and initiated the safepoint. |
|
488 |
// So we drain the queue again if there is anything there, as an |
|
489 |
// optimization to try and reduce the number of safepoints. |
|
490 |
// As the safepoint synchronizes us with JavaThreads we will see |
|
491 |
// any enqueue made by a JavaThread, but the peek will not |
|
492 |
// necessarily detect a concurrent enqueue by a GC thread, but |
|
493 |
// that simply means the op will wait for the next major cycle of the |
|
494 |
// VMThread - just as it would if the GC thread lost the race for |
|
495 |
// the lock. |
|
496 |
if (_vm_queue->peek_at_safepoint_priority()) { |
|
497 |
// must hold lock while draining queue |
|
498 |
MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
499 |
Mutex::_no_safepoint_check_flag); |
|
500 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
501 |
} else { |
|
502 |
safepoint_ops = NULL; |
|
503 |
} |
|
504 |
} while(safepoint_ops != NULL); |
|
505 |
||
506 |
_vm_queue->set_drain_list(NULL); |
|
507 |
||
508 |
// Complete safepoint synchronization |
|
509 |
SafepointSynchronize::end(); |
|
510 |
||
511 |
} else { // not a safepoint operation |
|
512 |
if (TraceLongCompiles) { |
|
513 |
elapsedTimer t; |
|
514 |
t.start(); |
|
515 |
evaluate_operation(_cur_vm_operation); |
|
516 |
t.stop(); |
|
517 |
double secs = t.seconds(); |
|
518 |
if (secs * 1e3 > LongCompileThreshold) { |
|
519 |
// XXX - _cur_vm_operation should not be accessed after |
|
520 |
// the completed count has been incremented; the waiting |
|
521 |
// thread may have already freed this memory. |
|
522 |
tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs); |
|
523 |
} |
|
524 |
} else { |
|
525 |
evaluate_operation(_cur_vm_operation); |
|
526 |
} |
|
527 |
||
528 |
_cur_vm_operation = NULL; |
|
529 |
} |
|
530 |
} |
|
531 |
||
532 |
// |
|
533 |
// Notify (potential) waiting Java thread(s) - lock without safepoint |
|
534 |
// check so that sneaking is not possible |
|
535 |
{ MutexLockerEx mu(VMOperationRequest_lock, |
|
536 |
Mutex::_no_safepoint_check_flag); |
|
537 |
VMOperationRequest_lock->notify_all(); |
|
538 |
} |
|
539 |
||
540 |
// |
|
541 |
// We want to make sure that we get to a safepoint regularly. |
|
542 |
// |
|
543 |
if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) { |
|
544 |
long interval = SafepointSynchronize::last_non_safepoint_interval(); |
|
545 |
bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval); |
|
546 |
if (SafepointALot || max_time_exceeded) { |
|
547 |
HandleMark hm(VMThread::vm_thread()); |
|
548 |
SafepointSynchronize::begin(); |
|
549 |
SafepointSynchronize::end(); |
|
550 |
} |
|
551 |
} |
|
552 |
} |
|
553 |
} |
|
554 |
||
555 |
void VMThread::execute(VM_Operation* op) { |
|
556 |
Thread* t = Thread::current(); |
|
557 |
||
558 |
if (!t->is_VM_thread()) { |
|
2995
d8283445992a
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
1
diff
changeset
|
559 |
SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot |
1 | 560 |
// JavaThread or WatcherThread |
561 |
t->check_for_valid_safepoint_state(true); |
|
562 |
||
563 |
// New request from Java thread, evaluate prologue |
|
564 |
if (!op->doit_prologue()) { |
|
565 |
return; // op was cancelled |
|
566 |
} |
|
567 |
||
568 |
// Setup VM_operations for execution |
|
569 |
op->set_calling_thread(t, Thread::get_priority(t)); |
|
570 |
||
571 |
// It does not make sense to execute the epilogue, if the VM operation object is getting |
|
572 |
// deallocated by the VM thread. |
|
573 |
bool concurrent = op->evaluate_concurrently(); |
|
574 |
bool execute_epilog = !op->is_cheap_allocated(); |
|
575 |
assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated"); |
|
576 |
||
577 |
// Get ticket number for non-concurrent VM operations |
|
578 |
int ticket = 0; |
|
579 |
if (!concurrent) { |
|
580 |
ticket = t->vm_operation_ticket(); |
|
581 |
} |
|
582 |
||
583 |
// Add VM operation to list of waiting threads. We are guaranteed not to block while holding the |
|
584 |
// VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests |
|
585 |
// to be queued up during a safepoint synchronization. |
|
586 |
{ |
|
587 |
VMOperationQueue_lock->lock_without_safepoint_check(); |
|
588 |
bool ok = _vm_queue->add(op); |
|
589 |
op->set_timestamp(os::javaTimeMillis()); |
|
590 |
VMOperationQueue_lock->notify(); |
|
591 |
VMOperationQueue_lock->unlock(); |
|
592 |
// VM_Operation got skipped |
|
593 |
if (!ok) { |
|
594 |
assert(concurrent, "can only skip concurrent tasks"); |
|
595 |
if (op->is_cheap_allocated()) delete op; |
|
596 |
return; |
|
597 |
} |
|
598 |
} |
|
599 |
||
600 |
if (!concurrent) { |
|
601 |
// Wait for completion of request (non-concurrent) |
|
602 |
// Note: only a JavaThread triggers the safepoint check when locking |
|
603 |
MutexLocker mu(VMOperationRequest_lock); |
|
604 |
while(t->vm_operation_completed_count() < ticket) { |
|
605 |
VMOperationRequest_lock->wait(!t->is_Java_thread()); |
|
606 |
} |
|
607 |
} |
|
608 |
||
609 |
if (execute_epilog) { |
|
610 |
op->doit_epilogue(); |
|
611 |
} |
|
612 |
} else { |
|
613 |
// invoked by VM thread; usually nested VM operation |
|
614 |
assert(t->is_VM_thread(), "must be a VM thread"); |
|
615 |
VM_Operation* prev_vm_operation = vm_operation(); |
|
616 |
if (prev_vm_operation != NULL) { |
|
617 |
// Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler |
|
618 |
// does not allow nested scavenges or compiles. |
|
619 |
if (!prev_vm_operation->allow_nested_vm_operations()) { |
|
5403
6b0dd9c75dde
6888954: argument formatting for assert() and friends
jcoomes
parents:
5402
diff
changeset
|
620 |
fatal(err_msg("Nested VM operation %s requested by operation %s", |
6b0dd9c75dde
6888954: argument formatting for assert() and friends
jcoomes
parents:
5402
diff
changeset
|
621 |
op->name(), vm_operation()->name())); |
1 | 622 |
} |
623 |
op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); |
|
624 |
} |
|
625 |
||
626 |
EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name()); |
|
627 |
||
628 |
// Release all internal handles after operation is evaluated |
|
629 |
HandleMark hm(t); |
|
630 |
_cur_vm_operation = op; |
|
631 |
||
632 |
if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { |
|
633 |
SafepointSynchronize::begin(); |
|
634 |
op->evaluate(); |
|
635 |
SafepointSynchronize::end(); |
|
636 |
} else { |
|
637 |
op->evaluate(); |
|
638 |
} |
|
639 |
||
640 |
// Free memory if needed |
|
641 |
if (op->is_cheap_allocated()) delete op; |
|
642 |
||
643 |
_cur_vm_operation = prev_vm_operation; |
|
644 |
} |
|
645 |
} |
|
646 |
||
647 |
||
3908
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
648 |
void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { |
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
649 |
Thread::oops_do(f, cf); |
1 | 650 |
_vm_queue->oops_do(f); |
651 |
} |
|
652 |
||
653 |
//------------------------------------------------------------------------------------------------------------------ |
|
654 |
#ifndef PRODUCT |
|
655 |
||
656 |
void VMOperationQueue::verify_queue(int prio) { |
|
657 |
// Check that list is correctly linked |
|
658 |
int length = _queue_length[prio]; |
|
659 |
VM_Operation *cur = _queue[prio]; |
|
660 |
int i; |
|
661 |
||
662 |
// Check forward links |
|
663 |
for(i = 0; i < length; i++) { |
|
664 |
cur = cur->next(); |
|
665 |
assert(cur != _queue[prio], "list to short (forward)"); |
|
666 |
} |
|
667 |
assert(cur->next() == _queue[prio], "list to long (forward)"); |
|
668 |
||
669 |
// Check backwards links |
|
670 |
cur = _queue[prio]; |
|
671 |
for(i = 0; i < length; i++) { |
|
672 |
cur = cur->prev(); |
|
673 |
assert(cur != _queue[prio], "list to short (backwards)"); |
|
674 |
} |
|
675 |
assert(cur->prev() == _queue[prio], "list to long (backwards)"); |
|
676 |
} |
|
677 |
||
678 |
#endif |
|
679 |
||
680 |
void VMThread::verify() { |
|
3908
24b55ad4c228
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
3261
diff
changeset
|
681 |
oops_do(&VerifyOopClosure::verify_oop, NULL); |
1 | 682 |
} |