author | jmasa |
Sun, 19 Oct 2014 20:23:12 -0700 | |
changeset 29870 | ea8305ce32fa |
parent 29326 | ebaa169c6dc3 |
child 30764 | fec48bf5a827 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
29084
1b732f2836ce
8073387: Move VerifyOopClosures out from genOopClosures.hpp
stefank
parents:
28163
diff
changeset
|
2 |
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5403
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "compiler/compileBroker.hpp" |
|
27 |
#include "gc_interface/collectedHeap.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
12379
diff
changeset
|
29 |
#include "oops/method.hpp" |
7397 | 30 |
#include "oops/oop.inline.hpp" |
29084
1b732f2836ce
8073387: Move VerifyOopClosures out from genOopClosures.hpp
stefank
parents:
28163
diff
changeset
|
31 |
#include "oops/verifyOopClosure.hpp" |
7397 | 32 |
#include "runtime/interfaceSupport.hpp" |
33 |
#include "runtime/mutexLocker.hpp" |
|
34 |
#include "runtime/os.hpp" |
|
14583
d70ee55535f4
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
14582
diff
changeset
|
35 |
#include "runtime/thread.inline.hpp" |
7397 | 36 |
#include "runtime/vmThread.hpp" |
37 |
#include "runtime/vm_operations.hpp" |
|
38 |
#include "services/runtimeService.hpp" |
|
18025 | 39 |
#include "trace/tracing.hpp" |
7397 | 40 |
#include "utilities/dtrace.hpp" |
41 |
#include "utilities/events.hpp" |
|
42 |
#include "utilities/xmlstream.hpp" |
|
1 | 43 |
|
24424
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
24331
diff
changeset
|
44 |
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
2658d7834c6e
8037816: Fix for 8036122 breaks build with Xcode5/clang
drchase
parents:
24331
diff
changeset
|
45 |
|
1 | 46 |
// Dummy VM operation to act as first element in our circular double-linked list |
47 |
class VM_Dummy: public VM_Operation { |
|
48 |
VMOp_Type type() const { return VMOp_Dummy; } |
|
49 |
void doit() {}; |
|
50 |
}; |
|
51 |
||
52 |
VMOperationQueue::VMOperationQueue() { |
|
53 |
// The queue is a circular doubled-linked list, which always contains |
|
54 |
// one element (i.e., one element means empty). |
|
55 |
for(int i = 0; i < nof_priorities; i++) { |
|
56 |
_queue_length[i] = 0; |
|
57 |
_queue_counter = 0; |
|
58 |
_queue[i] = new VM_Dummy(); |
|
59 |
_queue[i]->set_next(_queue[i]); |
|
60 |
_queue[i]->set_prev(_queue[i]); |
|
61 |
} |
|
62 |
_drain_list = NULL; |
|
63 |
} |
|
64 |
||
65 |
||
66 |
bool VMOperationQueue::queue_empty(int prio) { |
|
67 |
// It is empty if there is exactly one element |
|
68 |
bool empty = (_queue[prio] == _queue[prio]->next()); |
|
69 |
assert( (_queue_length[prio] == 0 && empty) || |
|
70 |
(_queue_length[prio] > 0 && !empty), "sanity check"); |
|
71 |
return _queue_length[prio] == 0; |
|
72 |
} |
|
73 |
||
74 |
// Inserts an element to the right of the q element |
|
75 |
void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) { |
|
76 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
77 |
n->set_prev(q); |
|
78 |
n->set_next(q->next()); |
|
79 |
q->next()->set_prev(n); |
|
80 |
q->set_next(n); |
|
81 |
} |
|
82 |
||
83 |
void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) { |
|
84 |
_queue_length[prio]++; |
|
85 |
insert(_queue[prio]->next(), op); |
|
86 |
} |
|
87 |
||
88 |
void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) { |
|
89 |
_queue_length[prio]++; |
|
90 |
insert(_queue[prio]->prev(), op); |
|
91 |
} |
|
92 |
||
93 |
||
94 |
void VMOperationQueue::unlink(VM_Operation* q) { |
|
95 |
assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); |
|
96 |
q->prev()->set_next(q->next()); |
|
97 |
q->next()->set_prev(q->prev()); |
|
98 |
} |
|
99 |
||
100 |
VM_Operation* VMOperationQueue::queue_remove_front(int prio) { |
|
101 |
if (queue_empty(prio)) return NULL; |
|
102 |
assert(_queue_length[prio] >= 0, "sanity check"); |
|
103 |
_queue_length[prio]--; |
|
104 |
VM_Operation* r = _queue[prio]->next(); |
|
105 |
assert(r != _queue[prio], "cannot remove base element"); |
|
106 |
unlink(r); |
|
107 |
return r; |
|
108 |
} |
|
109 |
||
110 |
VM_Operation* VMOperationQueue::queue_drain(int prio) { |
|
111 |
if (queue_empty(prio)) return NULL; |
|
112 |
DEBUG_ONLY(int length = _queue_length[prio];); |
|
113 |
assert(length >= 0, "sanity check"); |
|
114 |
_queue_length[prio] = 0; |
|
115 |
VM_Operation* r = _queue[prio]->next(); |
|
116 |
assert(r != _queue[prio], "cannot remove base element"); |
|
117 |
// remove links to base element from head and tail |
|
118 |
r->set_prev(NULL); |
|
119 |
_queue[prio]->prev()->set_next(NULL); |
|
120 |
// restore queue to empty state |
|
121 |
_queue[prio]->set_next(_queue[prio]); |
|
122 |
_queue[prio]->set_prev(_queue[prio]); |
|
5402
c51fd0c1d005
6888953: some calls to function-like macros are missing semicolons
jcoomes
parents:
5089
diff
changeset
|
123 |
assert(queue_empty(prio), "drain corrupted queue"); |
17006 | 124 |
#ifdef ASSERT |
1 | 125 |
int len = 0; |
126 |
VM_Operation* cur; |
|
127 |
for(cur = r; cur != NULL; cur=cur->next()) len++; |
|
128 |
assert(len == length, "drain lost some ops"); |
|
129 |
#endif |
|
130 |
return r; |
|
131 |
} |
|
132 |
||
133 |
void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) { |
|
134 |
VM_Operation* cur = _queue[queue]; |
|
135 |
cur = cur->next(); |
|
136 |
while (cur != _queue[queue]) { |
|
137 |
cur->oops_do(f); |
|
138 |
cur = cur->next(); |
|
139 |
} |
|
140 |
} |
|
141 |
||
142 |
void VMOperationQueue::drain_list_oops_do(OopClosure* f) { |
|
143 |
VM_Operation* cur = _drain_list; |
|
144 |
while (cur != NULL) { |
|
145 |
cur->oops_do(f); |
|
146 |
cur = cur->next(); |
|
147 |
} |
|
148 |
} |
|
149 |
||
150 |
//----------------------------------------------------------------- |
|
151 |
// High-level interface |
|
152 |
bool VMOperationQueue::add(VM_Operation *op) { |
|
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
153 |
|
10739 | 154 |
HOTSPOT_VMOPS_REQUEST( |
155 |
(char *) op->name(), strlen(op->name()), |
|
156 |
op->evaluation_mode()); |
|
5089
0cce506a0158
6935224: Adding new DTrace probes to work with Palantir
fparain
parents:
4901
diff
changeset
|
157 |
|
1 | 158 |
// Encapsulates VM queue policy. Currently, that |
159 |
// only involves putting them on the right list |
|
160 |
if (op->evaluate_at_safepoint()) { |
|
161 |
queue_add_back(SafepointPriority, op); |
|
162 |
return true; |
|
163 |
} |
|
164 |
||
165 |
queue_add_back(MediumPriority, op); |
|
166 |
return true; |
|
167 |
} |
|
168 |
||
169 |
VM_Operation* VMOperationQueue::remove_next() { |
|
170 |
// Assuming VMOperation queue is two-level priority queue. If there are |
|
171 |
// more than two priorities, we need a different scheduling algorithm. |
|
172 |
assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2, |
|
173 |
"current algorithm does not work"); |
|
174 |
||
175 |
// simple counter based scheduling to prevent starvation of lower priority |
|
176 |
// queue. -- see 4390175 |
|
177 |
int high_prio, low_prio; |
|
178 |
if (_queue_counter++ < 10) { |
|
179 |
high_prio = SafepointPriority; |
|
180 |
low_prio = MediumPriority; |
|
181 |
} else { |
|
182 |
_queue_counter = 0; |
|
183 |
high_prio = MediumPriority; |
|
184 |
low_prio = SafepointPriority; |
|
185 |
} |
|
186 |
||
187 |
return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio); |
|
188 |
} |
|
189 |
||
190 |
void VMOperationQueue::oops_do(OopClosure* f) { |
|
191 |
for(int i = 0; i < nof_priorities; i++) { |
|
192 |
queue_oops_do(i, f); |
|
193 |
} |
|
194 |
drain_list_oops_do(f); |
|
195 |
} |
|
196 |
||
197 |
||
198 |
//------------------------------------------------------------------------------------------------------------------ |
|
199 |
// Implementation of VMThread stuff |
|
200 |
||
201 |
bool VMThread::_should_terminate = false; |
|
202 |
bool VMThread::_terminated = false; |
|
203 |
Monitor* VMThread::_terminate_lock = NULL; |
|
204 |
VMThread* VMThread::_vm_thread = NULL; |
|
205 |
VM_Operation* VMThread::_cur_vm_operation = NULL; |
|
206 |
VMOperationQueue* VMThread::_vm_queue = NULL; |
|
207 |
PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; |
|
208 |
||
209 |
||
210 |
void VMThread::create() { |
|
211 |
assert(vm_thread() == NULL, "we can only allocate one VMThread"); |
|
212 |
_vm_thread = new VMThread(); |
|
213 |
||
214 |
// Create VM operation queue |
|
215 |
_vm_queue = new VMOperationQueue(); |
|
216 |
guarantee(_vm_queue != NULL, "just checking"); |
|
217 |
||
28163
322d55d167be
8047290: Make Mutex::_no_safepoint_check_flag locks verify that this lock never checks for safepoint
coleenp
parents:
27164
diff
changeset
|
218 |
_terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true, |
322d55d167be
8047290: Make Mutex::_no_safepoint_check_flag locks verify that this lock never checks for safepoint
coleenp
parents:
27164
diff
changeset
|
219 |
Monitor::_safepoint_check_never); |
1 | 220 |
|
221 |
if (UsePerfData) { |
|
222 |
// jvmstat performance counters |
|
223 |
Thread* THREAD = Thread::current(); |
|
224 |
_perf_accumulated_vm_operation_time = |
|
225 |
PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime", |
|
226 |
PerfData::U_Ticks, CHECK); |
|
227 |
} |
|
228 |
} |
|
229 |
||
230 |
||
4489
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
231 |
VMThread::VMThread() : NamedThread() { |
514173c9a0c2
6361589: Print out stack trace for target thread of GC crash
minqi
parents:
3908
diff
changeset
|
232 |
set_name("VM Thread"); |
1 | 233 |
} |
234 |
||
235 |
void VMThread::destroy() { |
|
236 |
if (_vm_thread != NULL) { |
|
237 |
delete _vm_thread; |
|
238 |
_vm_thread = NULL; // VM thread is gone |
|
239 |
} |
|
240 |
} |
|
241 |
||
242 |
void VMThread::run() { |
|
243 |
assert(this == vm_thread(), "check"); |
|
244 |
||
245 |
this->initialize_thread_local_storage(); |
|
29326 | 246 |
this->initialize_named_thread(); |
1 | 247 |
this->record_stack_base_and_size(); |
248 |
// Notify_lock wait checks on active_handles() to rewait in |
|
249 |
// case of spurious wakeup, it should wait on the last |
|
250 |
// value set prior to the notify |
|
251 |
this->set_active_handles(JNIHandleBlock::allocate_block()); |
|
252 |
||
253 |
{ |
|
254 |
MutexLocker ml(Notify_lock); |
|
255 |
Notify_lock->notify(); |
|
256 |
} |
|
257 |
// Notify_lock is destroyed by Threads::create_vm() |
|
258 |
||
259 |
int prio = (VMThreadPriority == -1) |
|
260 |
? os::java_to_os_priority[NearMaxPriority] |
|
261 |
: VMThreadPriority; |
|
262 |
// Note that I cannot call os::set_priority because it expects Java |
|
263 |
// priorities and I am *explicitly* using OS priorities so that it's |
|
264 |
// possible to set the VM thread priority higher than any Java thread. |
|
265 |
os::set_native_priority( this, prio ); |
|
266 |
||
267 |
// Wait for VM_Operations until termination |
|
268 |
this->loop(); |
|
269 |
||
270 |
// Note the intention to exit before safepointing. |
|
271 |
// 6295565 This has the effect of waiting for any large tty |
|
272 |
// outputs to finish. |
|
273 |
if (xtty != NULL) { |
|
274 |
ttyLocker ttyl; |
|
275 |
xtty->begin_elem("destroy_vm"); |
|
276 |
xtty->stamp(); |
|
277 |
xtty->end_elem(); |
|
278 |
assert(should_terminate(), "termination flag must be set"); |
|
279 |
} |
|
280 |
||
281 |
// 4526887 let VM thread exit at Safepoint |
|
282 |
SafepointSynchronize::begin(); |
|
283 |
||
284 |
if (VerifyBeforeExit) { |
|
285 |
HandleMark hm(VMThread::vm_thread()); |
|
286 |
// Among other things, this ensures that Eden top is correct. |
|
287 |
Universe::heap()->prepare_for_verify(); |
|
288 |
os::check_heap(); |
|
9342
456b8d0486b5
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
7397
diff
changeset
|
289 |
// Silent verification so as not to pollute normal output, |
456b8d0486b5
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
7397
diff
changeset
|
290 |
// unless we really asked for it. |
17112
e49af4ba7755
8013132: Add a flag to turn off the output of the verbose verification code
stefank
parents:
17006
diff
changeset
|
291 |
Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently); |
1 | 292 |
} |
293 |
||
294 |
CompileBroker::set_should_block(); |
|
295 |
||
296 |
// wait for threads (compiler threads or daemon threads) in the |
|
297 |
// _thread_in_native state to block. |
|
298 |
VM_Exit::wait_for_threads_in_native_to_block(); |
|
299 |
||
300 |
// signal other threads that VM process is gone |
|
301 |
{ |
|
302 |
// Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows |
|
303 |
// VM thread to enter any lock at Safepoint as long as its _owner is NULL. |
|
304 |
// If that happens after _terminate_lock->wait() has unset _owner |
|
305 |
// but before it actually drops the lock and waits, the notification below |
|
306 |
// may get lost and we will have a hang. To avoid this, we need to use |
|
307 |
// Mutex::lock_without_safepoint_check(). |
|
308 |
MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
309 |
_terminated = true; |
|
310 |
_terminate_lock->notify(); |
|
311 |
} |
|
312 |
||
23864
8af6bd3b1375
8033696: "assert(thread != NULL) failed: just checking" due to Thread::current() and JNI pthread interaction
kevinw
parents:
22905
diff
changeset
|
313 |
// Thread destructor usually does this. |
8af6bd3b1375
8033696: "assert(thread != NULL) failed: just checking" due to Thread::current() and JNI pthread interaction
kevinw
parents:
22905
diff
changeset
|
314 |
ThreadLocalStorage::set_thread(NULL); |
8af6bd3b1375
8033696: "assert(thread != NULL) failed: just checking" due to Thread::current() and JNI pthread interaction
kevinw
parents:
22905
diff
changeset
|
315 |
|
1 | 316 |
// Deletion must be done synchronously by the JNI DestroyJavaVM thread |
317 |
// so that the VMThread deletion completes before the main thread frees |
|
318 |
// up the CodeHeap. |
|
319 |
||
320 |
} |
|
321 |
||
322 |
||
323 |
// Notify the VMThread that the last non-daemon JavaThread has terminated, |
|
324 |
// and wait until operation is performed. |
|
325 |
void VMThread::wait_for_vm_thread_exit() { |
|
326 |
{ MutexLocker mu(VMOperationQueue_lock); |
|
327 |
_should_terminate = true; |
|
328 |
VMOperationQueue_lock->notify(); |
|
329 |
} |
|
330 |
||
331 |
// Note: VM thread leaves at Safepoint. We are not stopped by Safepoint |
|
332 |
// because this thread has been removed from the threads list. But anything |
|
333 |
// that could get blocked by Safepoint should not be used after this point, |
|
334 |
// otherwise we will hang, since there is no one can end the safepoint. |
|
335 |
||
336 |
// Wait until VM thread is terminated |
|
337 |
// Note: it should be OK to use Terminator_lock here. But this is called |
|
338 |
// at a very delicate time (VM shutdown) and we are operating in non- VM |
|
339 |
// thread at Safepoint. It's safer to not share lock with other threads. |
|
340 |
{ MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); |
|
341 |
while(!VMThread::is_terminated()) { |
|
342 |
_terminate_lock->wait(Mutex::_no_safepoint_check_flag); |
|
343 |
} |
|
344 |
} |
|
345 |
} |
|
346 |
||
347 |
void VMThread::evaluate_operation(VM_Operation* op) { |
|
348 |
ResourceMark rm; |
|
349 |
||
350 |
{ |
|
351 |
PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); |
|
10739 | 352 |
HOTSPOT_VMOPS_BEGIN( |
353 |
(char *) op->name(), strlen(op->name()), |
|
354 |
op->evaluation_mode()); |
|
18025 | 355 |
|
356 |
EventExecuteVMOperation event; |
|
357 |
||
1 | 358 |
op->evaluate(); |
18025 | 359 |
|
360 |
if (event.should_commit()) { |
|
361 |
bool is_concurrent = op->evaluate_concurrently(); |
|
362 |
event.set_operation(op->type()); |
|
363 |
event.set_safepoint(op->evaluate_at_safepoint()); |
|
364 |
event.set_blocking(!is_concurrent); |
|
365 |
// Only write caller thread information for non-concurrent vm operations. |
|
366 |
// For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. |
|
367 |
// This is because the caller thread could have exited already. |
|
368 |
event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id()); |
|
369 |
event.commit(); |
|
370 |
} |
|
371 |
||
10739 | 372 |
HOTSPOT_VMOPS_END( |
373 |
(char *) op->name(), strlen(op->name()), |
|
374 |
op->evaluation_mode()); |
|
1 | 375 |
} |
376 |
||
377 |
// Last access of info in _cur_vm_operation! |
|
378 |
bool c_heap_allocated = op->is_cheap_allocated(); |
|
379 |
||
380 |
// Mark as completed |
|
381 |
if (!op->evaluate_concurrently()) { |
|
382 |
op->calling_thread()->increment_vm_operation_completed_count(); |
|
383 |
} |
|
384 |
// It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call, |
|
385 |
// since if it is stack allocated the calling thread might have deallocated |
|
386 |
if (c_heap_allocated) { |
|
387 |
delete _cur_vm_operation; |
|
388 |
} |
|
389 |
} |
|
390 |
||
391 |
||
392 |
void VMThread::loop() { |
|
393 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
394 |
||
395 |
while(true) { |
|
396 |
VM_Operation* safepoint_ops = NULL; |
|
397 |
// |
|
398 |
// Wait for VM operation |
|
399 |
// |
|
400 |
// use no_safepoint_check to get lock without attempting to "sneak" |
|
401 |
{ MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
402 |
Mutex::_no_safepoint_check_flag); |
|
403 |
||
404 |
// Look for new operation |
|
405 |
assert(_cur_vm_operation == NULL, "no current one should be executing"); |
|
406 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
407 |
||
408 |
// Stall time tracking code |
|
409 |
if (PrintVMQWaitTime && _cur_vm_operation != NULL && |
|
410 |
!_cur_vm_operation->evaluate_concurrently()) { |
|
411 |
long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp(); |
|
412 |
if (stall > 0) |
|
413 |
tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall); |
|
414 |
} |
|
415 |
||
416 |
while (!should_terminate() && _cur_vm_operation == NULL) { |
|
417 |
// wait with a timeout to guarantee safepoints at regular intervals |
|
418 |
bool timedout = |
|
419 |
VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag, |
|
420 |
GuaranteedSafepointInterval); |
|
421 |
||
422 |
// Support for self destruction |
|
423 |
if ((SelfDestructTimer != 0) && !is_error_reported() && |
|
424 |
(os::elapsedTime() > SelfDestructTimer * 60)) { |
|
425 |
tty->print_cr("VM self-destructed"); |
|
426 |
exit(-1); |
|
427 |
} |
|
428 |
||
429 |
if (timedout && (SafepointALot || |
|
430 |
SafepointSynchronize::is_cleanup_needed())) { |
|
431 |
MutexUnlockerEx mul(VMOperationQueue_lock, |
|
432 |
Mutex::_no_safepoint_check_flag); |
|
433 |
// Force a safepoint since we have not had one for at least |
|
434 |
// 'GuaranteedSafepointInterval' milliseconds. This will run all |
|
435 |
// the clean-up processing that needs to be done regularly at a |
|
436 |
// safepoint |
|
437 |
SafepointSynchronize::begin(); |
|
438 |
#ifdef ASSERT |
|
439 |
if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); |
|
440 |
#endif |
|
441 |
SafepointSynchronize::end(); |
|
442 |
} |
|
443 |
_cur_vm_operation = _vm_queue->remove_next(); |
|
444 |
||
445 |
// If we are at a safepoint we will evaluate all the operations that |
|
446 |
// follow that also require a safepoint |
|
447 |
if (_cur_vm_operation != NULL && |
|
448 |
_cur_vm_operation->evaluate_at_safepoint()) { |
|
449 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
450 |
} |
|
451 |
} |
|
452 |
||
453 |
if (should_terminate()) break; |
|
454 |
} // Release mu_queue_lock |
|
455 |
||
456 |
// |
|
457 |
// Execute VM operation |
|
458 |
// |
|
459 |
{ HandleMark hm(VMThread::vm_thread()); |
|
460 |
||
461 |
EventMark em("Executing VM operation: %s", vm_operation()->name()); |
|
462 |
assert(_cur_vm_operation != NULL, "we should have found an operation to execute"); |
|
463 |
||
464 |
// Give the VM thread an extra quantum. Jobs tend to be bursty and this |
|
465 |
// helps the VM thread to finish up the job. |
|
466 |
// FIXME: When this is enabled and there are many threads, this can degrade |
|
467 |
// performance significantly. |
|
468 |
if( VMThreadHintNoPreempt ) |
|
469 |
os::hint_no_preempt(); |
|
470 |
||
471 |
// If we are at a safepoint we will evaluate all the operations that |
|
472 |
// follow that also require a safepoint |
|
473 |
if (_cur_vm_operation->evaluate_at_safepoint()) { |
|
474 |
||
475 |
_vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned |
|
476 |
||
477 |
SafepointSynchronize::begin(); |
|
478 |
evaluate_operation(_cur_vm_operation); |
|
479 |
// now process all queued safepoint ops, iteratively draining |
|
480 |
// the queue until there are none left |
|
481 |
do { |
|
482 |
_cur_vm_operation = safepoint_ops; |
|
483 |
if (_cur_vm_operation != NULL) { |
|
484 |
do { |
|
485 |
// evaluate_operation deletes the op object so we have |
|
486 |
// to grab the next op now |
|
487 |
VM_Operation* next = _cur_vm_operation->next(); |
|
488 |
_vm_queue->set_drain_list(next); |
|
489 |
evaluate_operation(_cur_vm_operation); |
|
490 |
_cur_vm_operation = next; |
|
491 |
if (PrintSafepointStatistics) { |
|
492 |
SafepointSynchronize::inc_vmop_coalesced_count(); |
|
493 |
} |
|
494 |
} while (_cur_vm_operation != NULL); |
|
495 |
} |
|
496 |
// There is a chance that a thread enqueued a safepoint op |
|
497 |
// since we released the op-queue lock and initiated the safepoint. |
|
498 |
// So we drain the queue again if there is anything there, as an |
|
499 |
// optimization to try and reduce the number of safepoints. |
|
500 |
// As the safepoint synchronizes us with JavaThreads we will see |
|
501 |
// any enqueue made by a JavaThread, but the peek will not |
|
502 |
// necessarily detect a concurrent enqueue by a GC thread, but |
|
503 |
// that simply means the op will wait for the next major cycle of the |
|
504 |
// VMThread - just as it would if the GC thread lost the race for |
|
505 |
// the lock. |
|
506 |
if (_vm_queue->peek_at_safepoint_priority()) { |
|
507 |
// must hold lock while draining queue |
|
508 |
MutexLockerEx mu_queue(VMOperationQueue_lock, |
|
509 |
Mutex::_no_safepoint_check_flag); |
|
510 |
safepoint_ops = _vm_queue->drain_at_safepoint_priority(); |
|
511 |
} else { |
|
512 |
safepoint_ops = NULL; |
|
513 |
} |
|
514 |
} while(safepoint_ops != NULL); |
|
515 |
||
516 |
_vm_queue->set_drain_list(NULL); |
|
517 |
||
518 |
// Complete safepoint synchronization |
|
519 |
SafepointSynchronize::end(); |
|
520 |
||
521 |
} else { // not a safepoint operation |
|
522 |
if (TraceLongCompiles) { |
|
523 |
elapsedTimer t; |
|
524 |
t.start(); |
|
525 |
evaluate_operation(_cur_vm_operation); |
|
526 |
t.stop(); |
|
527 |
double secs = t.seconds(); |
|
528 |
if (secs * 1e3 > LongCompileThreshold) { |
|
529 |
// XXX - _cur_vm_operation should not be accessed after |
|
530 |
// the completed count has been incremented; the waiting |
|
531 |
// thread may have already freed this memory. |
|
532 |
tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs); |
|
533 |
} |
|
534 |
} else { |
|
535 |
evaluate_operation(_cur_vm_operation); |
|
536 |
} |
|
537 |
||
538 |
_cur_vm_operation = NULL; |
|
539 |
} |
|
540 |
} |
|
541 |
||
542 |
// |
|
543 |
// Notify (potential) waiting Java thread(s) - lock without safepoint |
|
544 |
// check so that sneaking is not possible |
|
545 |
{ MutexLockerEx mu(VMOperationRequest_lock, |
|
546 |
Mutex::_no_safepoint_check_flag); |
|
547 |
VMOperationRequest_lock->notify_all(); |
|
548 |
} |
|
549 |
||
550 |
// |
|
551 |
// We want to make sure that we get to a safepoint regularly. |
|
552 |
// |
|
553 |
if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) { |
|
554 |
long interval = SafepointSynchronize::last_non_safepoint_interval(); |
|
555 |
bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval); |
|
556 |
if (SafepointALot || max_time_exceeded) { |
|
557 |
HandleMark hm(VMThread::vm_thread()); |
|
558 |
SafepointSynchronize::begin(); |
|
559 |
SafepointSynchronize::end(); |
|
560 |
} |
|
561 |
} |
|
562 |
} |
|
563 |
} |
|
564 |
||
565 |
void VMThread::execute(VM_Operation* op) { |
|
566 |
Thread* t = Thread::current(); |
|
567 |
||
568 |
if (!t->is_VM_thread()) { |
|
2995
d8283445992a
6820167: GCALotAtAllSafepoints + FullGCALot(ScavengeALot) options crash JVM
ysr
parents:
1
diff
changeset
|
569 |
SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot |
1 | 570 |
// JavaThread or WatcherThread |
15235
0a73bc0920e5
8004903: VMThread::execute() calls Thread::check_for_valid_safepoint_state() on concurrent VM ops
dcubed
parents:
14583
diff
changeset
|
571 |
bool concurrent = op->evaluate_concurrently(); |
0a73bc0920e5
8004903: VMThread::execute() calls Thread::check_for_valid_safepoint_state() on concurrent VM ops
dcubed
parents:
14583
diff
changeset
|
572 |
// only blocking VM operations need to verify the caller's safepoint state: |
0a73bc0920e5
8004903: VMThread::execute() calls Thread::check_for_valid_safepoint_state() on concurrent VM ops
dcubed
parents:
14583
diff
changeset
|
573 |
if (!concurrent) { |
0a73bc0920e5
8004903: VMThread::execute() calls Thread::check_for_valid_safepoint_state() on concurrent VM ops
dcubed
parents:
14583
diff
changeset
|
574 |
t->check_for_valid_safepoint_state(true); |
0a73bc0920e5
8004903: VMThread::execute() calls Thread::check_for_valid_safepoint_state() on concurrent VM ops
dcubed
parents:
14583
diff
changeset
|
575 |
} |
1 | 576 |
|
577 |
// New request from Java thread, evaluate prologue |
|
578 |
if (!op->doit_prologue()) { |
|
579 |
return; // op was cancelled |
|
580 |
} |
|
581 |
||
582 |
// Setup VM_operations for execution |
|
583 |
op->set_calling_thread(t, Thread::get_priority(t)); |
|
584 |
||
585 |
// It does not make sense to execute the epilogue, if the VM operation object is getting |
|
586 |
// deallocated by the VM thread. |
|
587 |
bool execute_epilog = !op->is_cheap_allocated(); |
|
588 |
assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated"); |
|
589 |
||
590 |
// Get ticket number for non-concurrent VM operations |
|
591 |
int ticket = 0; |
|
592 |
if (!concurrent) { |
|
593 |
ticket = t->vm_operation_ticket(); |
|
594 |
} |
|
595 |
||
596 |
// Add VM operation to list of waiting threads. We are guaranteed not to block while holding the |
|
597 |
// VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests |
|
598 |
// to be queued up during a safepoint synchronization. |
|
599 |
{ |
|
600 |
VMOperationQueue_lock->lock_without_safepoint_check(); |
|
601 |
bool ok = _vm_queue->add(op); |
|
18025 | 602 |
op->set_timestamp(os::javaTimeMillis()); |
1 | 603 |
VMOperationQueue_lock->notify(); |
604 |
VMOperationQueue_lock->unlock(); |
|
605 |
// VM_Operation got skipped |
|
606 |
if (!ok) { |
|
607 |
assert(concurrent, "can only skip concurrent tasks"); |
|
608 |
if (op->is_cheap_allocated()) delete op; |
|
609 |
return; |
|
610 |
} |
|
611 |
} |
|
612 |
||
613 |
if (!concurrent) { |
|
614 |
// Wait for completion of request (non-concurrent) |
|
615 |
// Note: only a JavaThread triggers the safepoint check when locking |
|
616 |
MutexLocker mu(VMOperationRequest_lock); |
|
617 |
while(t->vm_operation_completed_count() < ticket) { |
|
618 |
VMOperationRequest_lock->wait(!t->is_Java_thread()); |
|
619 |
} |
|
620 |
} |
|
621 |
||
622 |
if (execute_epilog) { |
|
623 |
op->doit_epilogue(); |
|
624 |
} |
|
625 |
} else { |
|
626 |
// invoked by VM thread; usually nested VM operation |
|
627 |
assert(t->is_VM_thread(), "must be a VM thread"); |
|
628 |
VM_Operation* prev_vm_operation = vm_operation(); |
|
629 |
if (prev_vm_operation != NULL) { |
|
630 |
// Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler |
|
631 |
// does not allow nested scavenges or compiles. |
|
632 |
if (!prev_vm_operation->allow_nested_vm_operations()) { |
|
5403
6b0dd9c75dde
6888954: argument formatting for assert() and friends
jcoomes
parents:
5402
diff
changeset
|
633 |
fatal(err_msg("Nested VM operation %s requested by operation %s", |
6b0dd9c75dde
6888954: argument formatting for assert() and friends
jcoomes
parents:
5402
diff
changeset
|
634 |
op->name(), vm_operation()->name())); |
1 | 635 |
} |
636 |
op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); |
|
637 |
} |
|
638 |
||
639 |
EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name()); |
|
640 |
||
641 |
// Release all internal handles after operation is evaluated |
|
642 |
HandleMark hm(t); |
|
643 |
_cur_vm_operation = op; |
|
644 |
||
645 |
if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { |
|
646 |
SafepointSynchronize::begin(); |
|
647 |
op->evaluate(); |
|
648 |
SafepointSynchronize::end(); |
|
649 |
} else { |
|
650 |
op->evaluate(); |
|
651 |
} |
|
652 |
||
653 |
// Free memory if needed |
|
654 |
if (op->is_cheap_allocated()) delete op; |
|
655 |
||
656 |
_cur_vm_operation = prev_vm_operation; |
|
657 |
} |
|
658 |
} |
|
659 |
||
660 |
||
22899
e2a6bf7f343a
8035393: Use CLDClosure instead of CLDToOopClosure in frame::oops_interpreted_do
stefank
parents:
18025
diff
changeset
|
661 |
void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
14582
490bb6c0df7c
8003720: NPG: Method in interpreter stack frame can be deallocated
stefank
parents:
13728
diff
changeset
|
662 |
Thread::oops_do(f, cld_f, cf); |
1 | 663 |
_vm_queue->oops_do(f); |
664 |
} |
|
665 |
||
666 |
//------------------------------------------------------------------------------------------------------------------ |
|
667 |
#ifndef PRODUCT |
|
668 |
||
669 |
void VMOperationQueue::verify_queue(int prio) { |
|
670 |
// Check that list is correctly linked |
|
671 |
int length = _queue_length[prio]; |
|
672 |
VM_Operation *cur = _queue[prio]; |
|
673 |
int i; |
|
674 |
||
675 |
// Check forward links |
|
676 |
for(i = 0; i < length; i++) { |
|
677 |
cur = cur->next(); |
|
678 |
assert(cur != _queue[prio], "list to short (forward)"); |
|
679 |
} |
|
680 |
assert(cur->next() == _queue[prio], "list to long (forward)"); |
|
681 |
||
682 |
// Check backwards links |
|
683 |
cur = _queue[prio]; |
|
684 |
for(i = 0; i < length; i++) { |
|
685 |
cur = cur->prev(); |
|
686 |
assert(cur != _queue[prio], "list to short (backwards)"); |
|
687 |
} |
|
688 |
assert(cur->prev() == _queue[prio], "list to long (backwards)"); |
|
689 |
} |
|
690 |
||
691 |
#endif |
|
692 |
||
693 |
void VMThread::verify() { |
|
14582
490bb6c0df7c
8003720: NPG: Method in interpreter stack frame can be deallocated
stefank
parents:
13728
diff
changeset
|
694 |
oops_do(&VerifyOopClosure::verify_oop, NULL, NULL); |
1 | 695 |
} |