author | never |
Mon, 31 Mar 2008 16:22:52 -0700 | |
changeset 254 | 717d75d80a30 |
parent 225 | 6258c2e3adfd |
child 670 | ddf3e9583f2f |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// do not include precompiled header file |
|
26 |
# include "incls/_os_linux_x86.cpp.incl" |
|
27 |
||
28 |
// put OS-includes here |
|
29 |
# include <sys/types.h> |
|
30 |
# include <sys/mman.h> |
|
31 |
# include <pthread.h> |
|
32 |
# include <signal.h> |
|
33 |
# include <errno.h> |
|
34 |
# include <dlfcn.h> |
|
35 |
# include <stdlib.h> |
|
36 |
# include <stdio.h> |
|
37 |
# include <unistd.h> |
|
38 |
# include <sys/resource.h> |
|
39 |
# include <pthread.h> |
|
40 |
# include <sys/stat.h> |
|
41 |
# include <sys/time.h> |
|
42 |
# include <sys/utsname.h> |
|
43 |
# include <sys/socket.h> |
|
44 |
# include <sys/wait.h> |
|
45 |
# include <pwd.h> |
|
46 |
# include <poll.h> |
|
47 |
# include <ucontext.h> |
|
48 |
# include <fpu_control.h> |
|
49 |
||
50 |
#ifdef AMD64 |
|
51 |
#define REG_SP REG_RSP |
|
52 |
#define REG_PC REG_RIP |
|
53 |
#define REG_FP REG_RBP |
|
54 |
#define SPELL_REG_SP "rsp" |
|
55 |
#define SPELL_REG_FP "rbp" |
|
56 |
#else |
|
57 |
#define REG_SP REG_UESP |
|
58 |
#define REG_PC REG_EIP |
|
59 |
#define REG_FP REG_EBP |
|
60 |
#define SPELL_REG_SP "esp" |
|
61 |
#define SPELL_REG_FP "ebp" |
|
62 |
#endif // AMD64 |
|
63 |
||
64 |
address os::current_stack_pointer() { |
|
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
65 |
#ifdef SPARC_WORKS |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
66 |
register void *esp; |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
67 |
__asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
68 |
return (address) ((char*)esp + sizeof(long)*2); |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
69 |
#else |
1 | 70 |
register void *esp __asm__ (SPELL_REG_SP); |
71 |
return (address) esp; |
|
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
72 |
#endif |
1 | 73 |
} |
74 |
||
75 |
char* os::non_memory_address_word() { |
|
76 |
// Must never look like an address returned by reserve_memory, |
|
77 |
// even in its subfields (as defined by the CPU immediate fields, |
|
78 |
// if the CPU splits constants across multiple instructions). |
|
79 |
||
80 |
return (char*) -1; |
|
81 |
} |
|
82 |
||
83 |
void os::initialize_thread() { |
|
84 |
// Nothing to do. |
|
85 |
} |
|
86 |
||
87 |
address os::Linux::ucontext_get_pc(ucontext_t * uc) { |
|
88 |
return (address)uc->uc_mcontext.gregs[REG_PC]; |
|
89 |
} |
|
90 |
||
91 |
intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { |
|
92 |
return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; |
|
93 |
} |
|
94 |
||
95 |
intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { |
|
96 |
return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; |
|
97 |
} |
|
98 |
||
99 |
// For Forte Analyzer AsyncGetCallTrace profiling support - thread |
|
100 |
// is currently interrupted by SIGPROF. |
|
101 |
// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal |
|
102 |
// frames. Currently we don't do that on Linux, so it's the same as |
|
103 |
// os::fetch_frame_from_context(). |
|
104 |
ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, |
|
105 |
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { |
|
106 |
||
107 |
assert(thread != NULL, "just checking"); |
|
108 |
assert(ret_sp != NULL, "just checking"); |
|
109 |
assert(ret_fp != NULL, "just checking"); |
|
110 |
||
111 |
return os::fetch_frame_from_context(uc, ret_sp, ret_fp); |
|
112 |
} |
|
113 |
||
114 |
ExtendedPC os::fetch_frame_from_context(void* ucVoid, |
|
115 |
intptr_t** ret_sp, intptr_t** ret_fp) { |
|
116 |
||
117 |
ExtendedPC epc; |
|
118 |
ucontext_t* uc = (ucontext_t*)ucVoid; |
|
119 |
||
120 |
if (uc != NULL) { |
|
121 |
epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); |
|
122 |
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc); |
|
123 |
if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc); |
|
124 |
} else { |
|
125 |
// construct empty ExtendedPC for return value checking |
|
126 |
epc = ExtendedPC(NULL); |
|
127 |
if (ret_sp) *ret_sp = (intptr_t *)NULL; |
|
128 |
if (ret_fp) *ret_fp = (intptr_t *)NULL; |
|
129 |
} |
|
130 |
||
131 |
return epc; |
|
132 |
} |
|
133 |
||
134 |
frame os::fetch_frame_from_context(void* ucVoid) { |
|
135 |
intptr_t* sp; |
|
136 |
intptr_t* fp; |
|
137 |
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
|
138 |
return frame(sp, fp, epc.pc()); |
|
139 |
} |
|
140 |
||
141 |
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get |
|
142 |
// turned off by -fomit-frame-pointer, |
|
143 |
frame os::get_sender_for_C_frame(frame* fr) { |
|
144 |
return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); |
|
145 |
} |
|
146 |
||
147 |
intptr_t* _get_previous_fp() { |
|
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
148 |
#ifdef SPARC_WORKS |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
149 |
register intptr_t **ebp; |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
150 |
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
151 |
#else |
1 | 152 |
register intptr_t **ebp __asm__ (SPELL_REG_FP); |
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
153 |
#endif |
1 | 154 |
return (intptr_t*) *ebp; // we want what it points to. |
155 |
} |
|
156 |
||
157 |
||
158 |
frame os::current_frame() { |
|
159 |
intptr_t* fp = _get_previous_fp(); |
|
160 |
frame myframe((intptr_t*)os::current_stack_pointer(), |
|
161 |
(intptr_t*)fp, |
|
162 |
CAST_FROM_FN_PTR(address, os::current_frame)); |
|
163 |
if (os::is_first_C_frame(&myframe)) { |
|
164 |
// stack is not walkable |
|
165 |
return frame(NULL, NULL, NULL); |
|
166 |
} else { |
|
167 |
return os::get_sender_for_C_frame(&myframe); |
|
168 |
} |
|
169 |
} |
|
170 |
||
171 |
// Utility functions |
|
172 |
||
173 |
// From IA32 System Programming Guide |
|
174 |
enum { |
|
175 |
trap_page_fault = 0xE |
|
176 |
}; |
|
177 |
||
178 |
extern "C" void Fetch32PFI () ; |
|
179 |
extern "C" void Fetch32Resume () ; |
|
180 |
#ifdef AMD64 |
|
181 |
extern "C" void FetchNPFI () ; |
|
182 |
extern "C" void FetchNResume () ; |
|
183 |
#endif // AMD64 |
|
184 |
||
185 |
extern "C" int |
|
186 |
JVM_handle_linux_signal(int sig, |
|
187 |
siginfo_t* info, |
|
188 |
void* ucVoid, |
|
189 |
int abort_if_unrecognized) { |
|
190 |
ucontext_t* uc = (ucontext_t*) ucVoid; |
|
191 |
||
192 |
Thread* t = ThreadLocalStorage::get_thread_slow(); |
|
193 |
||
194 |
SignalHandlerMark shm(t); |
|
195 |
||
196 |
// Note: it's not uncommon that JNI code uses signal/sigset to install |
|
197 |
// then restore certain signal handler (e.g. to temporarily block SIGPIPE, |
|
198 |
// or have a SIGILL handler when detecting CPU type). When that happens, |
|
199 |
// JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To |
|
200 |
// avoid unnecessary crash when libjsig is not preloaded, try handle signals |
|
201 |
// that do not require siginfo/ucontext first. |
|
202 |
||
203 |
if (sig == SIGPIPE || sig == SIGXFSZ) { |
|
204 |
// allow chained handler to go first |
|
205 |
if (os::Linux::chained_handler(sig, info, ucVoid)) { |
|
206 |
return true; |
|
207 |
} else { |
|
208 |
if (PrintMiscellaneous && (WizardMode || Verbose)) { |
|
209 |
char buf[64]; |
|
210 |
warning("Ignoring %s - see bugs 4229104 or 646499219", |
|
211 |
os::exception_name(sig, buf, sizeof(buf))); |
|
212 |
} |
|
213 |
return true; |
|
214 |
} |
|
215 |
} |
|
216 |
||
217 |
JavaThread* thread = NULL; |
|
218 |
VMThread* vmthread = NULL; |
|
219 |
if (os::Linux::signal_handlers_are_installed) { |
|
220 |
if (t != NULL ){ |
|
221 |
if(t->is_Java_thread()) { |
|
222 |
thread = (JavaThread*)t; |
|
223 |
} |
|
224 |
else if(t->is_VM_thread()){ |
|
225 |
vmthread = (VMThread *)t; |
|
226 |
} |
|
227 |
} |
|
228 |
} |
|
229 |
/* |
|
230 |
NOTE: does not seem to work on linux. |
|
231 |
if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { |
|
232 |
// can't decode this kind of signal |
|
233 |
info = NULL; |
|
234 |
} else { |
|
235 |
assert(sig == info->si_signo, "bad siginfo"); |
|
236 |
} |
|
237 |
*/ |
|
238 |
// decide if this trap can be handled by a stub |
|
239 |
address stub = NULL; |
|
240 |
||
241 |
address pc = NULL; |
|
242 |
||
243 |
//%note os_trap_1 |
|
244 |
if (info != NULL && uc != NULL && thread != NULL) { |
|
245 |
pc = (address) os::Linux::ucontext_get_pc(uc); |
|
246 |
||
247 |
if (pc == (address) Fetch32PFI) { |
|
248 |
uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; |
|
249 |
return 1 ; |
|
250 |
} |
|
251 |
#ifdef AMD64 |
|
252 |
if (pc == (address) FetchNPFI) { |
|
253 |
uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; |
|
254 |
return 1 ; |
|
255 |
} |
|
256 |
#endif // AMD64 |
|
257 |
||
258 |
// Handle ALL stack overflow variations here |
|
259 |
if (sig == SIGSEGV) { |
|
260 |
address addr = (address) info->si_addr; |
|
261 |
||
262 |
// check if fault address is within thread stack |
|
263 |
if (addr < thread->stack_base() && |
|
264 |
addr >= thread->stack_base() - thread->stack_size()) { |
|
265 |
// stack overflow |
|
266 |
if (thread->in_stack_yellow_zone(addr)) { |
|
267 |
thread->disable_stack_yellow_zone(); |
|
268 |
if (thread->thread_state() == _thread_in_Java) { |
|
269 |
// Throw a stack overflow exception. Guard pages will be reenabled |
|
270 |
// while unwinding the stack. |
|
271 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
|
272 |
} else { |
|
273 |
// Thread was in the vm or native code. Return and try to finish. |
|
274 |
return 1; |
|
275 |
} |
|
276 |
} else if (thread->in_stack_red_zone(addr)) { |
|
277 |
// Fatal red zone violation. Disable the guard pages and fall through |
|
278 |
// to handle_unexpected_exception way down below. |
|
279 |
thread->disable_stack_red_zone(); |
|
280 |
tty->print_raw_cr("An irrecoverable stack overflow has occurred."); |
|
281 |
} else { |
|
282 |
// Accessing stack address below sp may cause SEGV if current |
|
283 |
// thread has MAP_GROWSDOWN stack. This should only happen when |
|
284 |
// current thread was created by user code with MAP_GROWSDOWN flag |
|
285 |
// and then attached to VM. See notes in os_linux.cpp. |
|
286 |
if (thread->osthread()->expanding_stack() == 0) { |
|
287 |
thread->osthread()->set_expanding_stack(); |
|
288 |
if (os::Linux::manually_expand_stack(thread, addr)) { |
|
289 |
thread->osthread()->clear_expanding_stack(); |
|
290 |
return 1; |
|
291 |
} |
|
292 |
thread->osthread()->clear_expanding_stack(); |
|
293 |
} else { |
|
294 |
fatal("recursive segv. expanding stack."); |
|
295 |
} |
|
296 |
} |
|
297 |
} |
|
298 |
} |
|
299 |
||
300 |
if (thread->thread_state() == _thread_in_Java) { |
|
301 |
// Java thread running in Java code => find exception handler if any |
|
302 |
// a fault inside compiled code, the interpreter, or a stub |
|
303 |
||
304 |
if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { |
|
305 |
stub = SharedRuntime::get_poll_stub(pc); |
|
306 |
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { |
|
307 |
// BugId 4454115: A read from a MappedByteBuffer can fault |
|
308 |
// here if the underlying file has been truncated. |
|
309 |
// Do not crash the VM in such a case. |
|
310 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); |
|
311 |
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; |
|
312 |
if (nm != NULL && nm->has_unsafe_access()) { |
|
313 |
stub = StubRoutines::handler_for_unsafe_access(); |
|
314 |
} |
|
315 |
} |
|
316 |
else |
|
317 |
||
318 |
#ifdef AMD64 |
|
319 |
if (sig == SIGFPE && |
|
320 |
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { |
|
321 |
stub = |
|
322 |
SharedRuntime:: |
|
323 |
continuation_for_implicit_exception(thread, |
|
324 |
pc, |
|
325 |
SharedRuntime:: |
|
326 |
IMPLICIT_DIVIDE_BY_ZERO); |
|
327 |
#else |
|
328 |
if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) { |
|
329 |
// HACK: si_code does not work on linux 2.2.12-20!!! |
|
330 |
int op = pc[0]; |
|
331 |
if (op == 0xDB) { |
|
332 |
// FIST |
|
333 |
// TODO: The encoding of D2I in i486.ad can cause an exception |
|
334 |
// prior to the fist instruction if there was an invalid operation |
|
335 |
// pending. We want to dismiss that exception. From the win_32 |
|
336 |
// side it also seems that if it really was the fist causing |
|
337 |
// the exception that we do the d2i by hand with different |
|
338 |
// rounding. Seems kind of weird. |
|
339 |
// NOTE: that we take the exception at the NEXT floating point instruction. |
|
340 |
assert(pc[0] == 0xDB, "not a FIST opcode"); |
|
341 |
assert(pc[1] == 0x14, "not a FIST opcode"); |
|
342 |
assert(pc[2] == 0x24, "not a FIST opcode"); |
|
343 |
return true; |
|
344 |
} else if (op == 0xF7) { |
|
345 |
// IDIV |
|
346 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
|
347 |
} else { |
|
348 |
// TODO: handle more cases if we are using other x86 instructions |
|
349 |
// that can generate SIGFPE signal on linux. |
|
350 |
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op); |
|
351 |
fatal("please update this code."); |
|
352 |
} |
|
353 |
#endif // AMD64 |
|
354 |
} else if (sig == SIGSEGV && |
|
355 |
!MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { |
|
356 |
// Determination of interpreter/vtable stub/compiled code null exception |
|
357 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); |
|
358 |
} |
|
359 |
} else if (thread->thread_state() == _thread_in_vm && |
|
360 |
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ |
|
361 |
thread->doing_unsafe_access()) { |
|
362 |
stub = StubRoutines::handler_for_unsafe_access(); |
|
363 |
} |
|
364 |
||
365 |
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in |
|
366 |
// and the heap gets shrunk before the field access. |
|
367 |
if ((sig == SIGSEGV) || (sig == SIGBUS)) { |
|
368 |
address addr = JNI_FastGetField::find_slowcase_pc(pc); |
|
369 |
if (addr != (address)-1) { |
|
370 |
stub = addr; |
|
371 |
} |
|
372 |
} |
|
373 |
||
374 |
// Check to see if we caught the safepoint code in the |
|
375 |
// process of write protecting the memory serialization page. |
|
376 |
// It write enables the page immediately after protecting it |
|
377 |
// so we can just return to retry the write. |
|
378 |
if ((sig == SIGSEGV) && |
|
379 |
os::is_memory_serialize_page(thread, (address) info->si_addr)) { |
|
380 |
// Block current thread until the memory serialize page permission restored. |
|
381 |
os::block_on_serialize_page_trap(); |
|
382 |
return true; |
|
383 |
} |
|
384 |
} |
|
385 |
||
386 |
#ifndef AMD64 |
|
387 |
// Execution protection violation |
|
388 |
// |
|
389 |
// This should be kept as the last step in the triage. We don't |
|
390 |
// have a dedicated trap number for a no-execute fault, so be |
|
391 |
// conservative and allow other handlers the first shot. |
|
392 |
// |
|
393 |
// Note: We don't test that info->si_code == SEGV_ACCERR here. |
|
394 |
// this si_code is so generic that it is almost meaningless; and |
|
395 |
// the si_code for this condition may change in the future. |
|
396 |
// Furthermore, a false-positive should be harmless. |
|
397 |
if (UnguardOnExecutionViolation > 0 && |
|
398 |
(sig == SIGSEGV || sig == SIGBUS) && |
|
399 |
uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) { |
|
400 |
int page_size = os::vm_page_size(); |
|
401 |
address addr = (address) info->si_addr; |
|
402 |
address pc = os::Linux::ucontext_get_pc(uc); |
|
403 |
// Make sure the pc and the faulting address are sane. |
|
404 |
// |
|
405 |
// If an instruction spans a page boundary, and the page containing |
|
406 |
// the beginning of the instruction is executable but the following |
|
407 |
// page is not, the pc and the faulting address might be slightly |
|
408 |
// different - we still want to unguard the 2nd page in this case. |
|
409 |
// |
|
410 |
// 15 bytes seems to be a (very) safe value for max instruction size. |
|
411 |
bool pc_is_near_addr = |
|
412 |
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); |
|
413 |
bool instr_spans_page_boundary = |
|
414 |
(align_size_down((intptr_t) pc ^ (intptr_t) addr, |
|
415 |
(intptr_t) page_size) > 0); |
|
416 |
||
417 |
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { |
|
418 |
static volatile address last_addr = |
|
419 |
(address) os::non_memory_address_word(); |
|
420 |
||
421 |
// In conservative mode, don't unguard unless the address is in the VM |
|
422 |
if (addr != last_addr && |
|
423 |
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { |
|
424 |
||
425 |
// Unguard and retry |
|
426 |
address page_start = |
|
427 |
(address) align_size_down((intptr_t) addr, (intptr_t) page_size); |
|
428 |
bool res = os::unguard_memory((char*) page_start, page_size); |
|
429 |
||
430 |
if (PrintMiscellaneous && Verbose) { |
|
431 |
char buf[256]; |
|
432 |
jio_snprintf(buf, sizeof(buf), "Execution protection violation " |
|
433 |
"at " INTPTR_FORMAT |
|
434 |
", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, |
|
435 |
page_start, (res ? "success" : "failed"), errno); |
|
436 |
tty->print_raw_cr(buf); |
|
437 |
} |
|
438 |
stub = pc; |
|
439 |
||
440 |
// Set last_addr so if we fault again at the same address, we don't end |
|
441 |
// up in an endless loop. |
|
442 |
// |
|
443 |
// There are two potential complications here. Two threads trapping at |
|
444 |
// the same address at the same time could cause one of the threads to |
|
445 |
// think it already unguarded, and abort the VM. Likely very rare. |
|
446 |
// |
|
447 |
// The other race involves two threads alternately trapping at |
|
448 |
// different addresses and failing to unguard the page, resulting in |
|
449 |
// an endless loop. This condition is probably even more unlikely than |
|
450 |
// the first. |
|
451 |
// |
|
452 |
// Although both cases could be avoided by using locks or thread local |
|
453 |
// last_addr, these solutions are unnecessary complication: this |
|
454 |
// handler is a best-effort safety net, not a complete solution. It is |
|
455 |
// disabled by default and should only be used as a workaround in case |
|
456 |
// we missed any no-execute-unsafe VM code. |
|
457 |
||
458 |
last_addr = addr; |
|
459 |
} |
|
460 |
} |
|
461 |
} |
|
462 |
#endif // !AMD64 |
|
463 |
||
464 |
if (stub != NULL) { |
|
465 |
// save all thread context in case we need to restore it |
|
466 |
if (thread != NULL) thread->set_saved_exception_pc(pc); |
|
467 |
||
468 |
uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; |
|
469 |
return true; |
|
470 |
} |
|
471 |
||
472 |
// signal-chaining |
|
473 |
if (os::Linux::chained_handler(sig, info, ucVoid)) { |
|
474 |
return true; |
|
475 |
} |
|
476 |
||
477 |
if (!abort_if_unrecognized) { |
|
478 |
// caller wants another chance, so give it to him |
|
479 |
return false; |
|
480 |
} |
|
481 |
||
482 |
if (pc == NULL && uc != NULL) { |
|
483 |
pc = os::Linux::ucontext_get_pc(uc); |
|
484 |
} |
|
485 |
||
486 |
// unmask current signal |
|
487 |
sigset_t newset; |
|
488 |
sigemptyset(&newset); |
|
489 |
sigaddset(&newset, sig); |
|
490 |
sigprocmask(SIG_UNBLOCK, &newset, NULL); |
|
491 |
||
492 |
VMError err(t, sig, pc, info, ucVoid); |
|
493 |
err.report_and_die(); |
|
494 |
||
495 |
ShouldNotReachHere(); |
|
496 |
} |
|
497 |
||
498 |
void os::Linux::init_thread_fpu_state(void) { |
|
499 |
#ifndef AMD64 |
|
500 |
// set fpu to 53 bit precision |
|
501 |
set_fpu_control_word(0x27f); |
|
502 |
#endif // !AMD64 |
|
503 |
} |
|
504 |
||
505 |
int os::Linux::get_fpu_control_word(void) { |
|
506 |
#ifdef AMD64 |
|
507 |
return 0; |
|
508 |
#else |
|
509 |
int fpu_control; |
|
510 |
_FPU_GETCW(fpu_control); |
|
511 |
return fpu_control & 0xffff; |
|
512 |
#endif // AMD64 |
|
513 |
} |
|
514 |
||
515 |
void os::Linux::set_fpu_control_word(int fpu_control) { |
|
516 |
#ifndef AMD64 |
|
517 |
_FPU_SETCW(fpu_control); |
|
518 |
#endif // !AMD64 |
|
519 |
} |
|
520 |
||
521 |
// Check that the linux kernel version is 2.4 or higher since earlier |
|
522 |
// versions do not support SSE without patches. |
|
523 |
bool os::supports_sse() { |
|
524 |
#ifdef AMD64 |
|
525 |
return true; |
|
526 |
#else |
|
527 |
struct utsname uts; |
|
528 |
if( uname(&uts) != 0 ) return false; // uname fails? |
|
529 |
char *minor_string; |
|
530 |
int major = strtol(uts.release,&minor_string,10); |
|
531 |
int minor = strtol(minor_string+1,NULL,10); |
|
532 |
bool result = (major > 2 || (major==2 && minor >= 4)); |
|
533 |
#ifndef PRODUCT |
|
534 |
if (PrintMiscellaneous && Verbose) { |
|
535 |
tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", |
|
536 |
major,minor, result ? "DOES" : "does NOT"); |
|
537 |
} |
|
538 |
#endif |
|
539 |
return result; |
|
540 |
#endif // AMD64 |
|
541 |
} |
|
542 |
||
543 |
bool os::is_allocatable(size_t bytes) { |
|
544 |
#ifdef AMD64 |
|
545 |
// unused on amd64? |
|
546 |
return true; |
|
547 |
#else |
|
548 |
||
549 |
if (bytes < 2 * G) { |
|
550 |
return true; |
|
551 |
} |
|
552 |
||
553 |
char* addr = reserve_memory(bytes, NULL); |
|
554 |
||
555 |
if (addr != NULL) { |
|
556 |
release_memory(addr, bytes); |
|
557 |
} |
|
558 |
||
559 |
return addr != NULL; |
|
560 |
#endif // AMD64 |
|
561 |
} |
|
562 |
||
563 |
//////////////////////////////////////////////////////////////////////////////// |
|
564 |
// thread stack |
|
565 |
||
566 |
#ifdef AMD64 |
|
567 |
size_t os::Linux::min_stack_allowed = 64 * K; |
|
568 |
||
569 |
// amd64: pthread on amd64 is always in floating stack mode |
|
570 |
bool os::Linux::supports_variable_stack_size() { return true; } |
|
571 |
#else |
|
572 |
size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; |
|
573 |
||
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
574 |
#ifdef __GNUC__ |
1 | 575 |
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) |
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
576 |
#endif |
1 | 577 |
|
578 |
// Test if pthread library can support variable thread stack size. LinuxThreads |
|
579 |
// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads |
|
580 |
// in floating stack mode and NPTL support variable stack size. |
|
581 |
bool os::Linux::supports_variable_stack_size() { |
|
582 |
if (os::Linux::is_NPTL()) { |
|
583 |
// NPTL, yes |
|
584 |
return true; |
|
585 |
||
586 |
} else { |
|
587 |
// Note: We can't control default stack size when creating a thread. |
|
588 |
// If we use non-default stack size (pthread_attr_setstacksize), both |
|
589 |
// floating stack and non-floating stack LinuxThreads will return the |
|
590 |
// same value. This makes it impossible to implement this function by |
|
591 |
// detecting thread stack size directly. |
|
592 |
// |
|
593 |
// An alternative approach is to check %gs. Fixed-stack LinuxThreads |
|
594 |
// do not use %gs, so its value is 0. Floating-stack LinuxThreads use |
|
595 |
// %gs (either as LDT selector or GDT selector, depending on kernel) |
|
596 |
// to access thread specific data. |
|
597 |
// |
|
598 |
// Note that %gs is a reserved glibc register since early 2001, so |
|
599 |
// applications are not allowed to change its value (Ulrich Drepper from |
|
600 |
// Redhat confirmed that all known offenders have been modified to use |
|
601 |
// either %fs or TSD). In the worst case scenario, when VM is embedded in |
|
602 |
// a native application that plays with %gs, we might see non-zero %gs |
|
603 |
// even LinuxThreads is running in fixed stack mode. As the result, we'll |
|
604 |
// return true and skip _thread_safety_check(), so we may not be able to |
|
605 |
// detect stack-heap collisions. But otherwise it's harmless. |
|
606 |
// |
|
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
607 |
#ifdef __GNUC__ |
1 | 608 |
return (GET_GS() != 0); |
223
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
609 |
#else |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
610 |
return false; |
5c3b023117d9
6452081: 3/4 Allow for Linux builds with Sun Studio Linux compilers
dcubed
parents:
1
diff
changeset
|
611 |
#endif |
1 | 612 |
} |
613 |
} |
|
614 |
#endif // AMD64 |
|
615 |
||
616 |
// return default stack size for thr_type |
|
617 |
size_t os::Linux::default_stack_size(os::ThreadType thr_type) { |
|
618 |
// default stack size (compiler thread needs larger stack) |
|
619 |
#ifdef AMD64 |
|
620 |
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); |
|
621 |
#else |
|
622 |
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); |
|
623 |
#endif // AMD64 |
|
624 |
return s; |
|
625 |
} |
|
626 |
||
627 |
size_t os::Linux::default_guard_size(os::ThreadType thr_type) { |
|
628 |
// Creating guard page is very expensive. Java thread has HotSpot |
|
629 |
// guard page, only enable glibc guard page for non-Java threads. |
|
630 |
return (thr_type == java_thread ? 0 : page_size()); |
|
631 |
} |
|
632 |
||
633 |
// Java thread: |
|
634 |
// |
|
635 |
// Low memory addresses |
|
636 |
// +------------------------+ |
|
637 |
// | |\ JavaThread created by VM does not have glibc |
|
638 |
// | glibc guard page | - guard, attached Java thread usually has |
|
639 |
// | |/ 1 page glibc guard. |
|
640 |
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() |
|
641 |
// | |\ |
|
642 |
// | HotSpot Guard Pages | - red and yellow pages |
|
643 |
// | |/ |
|
644 |
// +------------------------+ JavaThread::stack_yellow_zone_base() |
|
645 |
// | |\ |
|
646 |
// | Normal Stack | - |
|
647 |
// | |/ |
|
648 |
// P2 +------------------------+ Thread::stack_base() |
|
649 |
// |
|
650 |
// Non-Java thread: |
|
651 |
// |
|
652 |
// Low memory addresses |
|
653 |
// +------------------------+ |
|
654 |
// | |\ |
|
655 |
// | glibc guard page | - usually 1 page |
|
656 |
// | |/ |
|
657 |
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() |
|
658 |
// | |\ |
|
659 |
// | Normal Stack | - |
|
660 |
// | |/ |
|
661 |
// P2 +------------------------+ Thread::stack_base() |
|
662 |
// |
|
663 |
// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from |
|
664 |
// pthread_attr_getstack() |
|
665 |
||
666 |
static void current_stack_region(address * bottom, size_t * size) { |
|
667 |
if (os::Linux::is_initial_thread()) { |
|
668 |
// initial thread needs special handling because pthread_getattr_np() |
|
669 |
// may return bogus value. |
|
670 |
*bottom = os::Linux::initial_thread_stack_bottom(); |
|
671 |
*size = os::Linux::initial_thread_stack_size(); |
|
672 |
} else { |
|
673 |
pthread_attr_t attr; |
|
674 |
||
675 |
int rslt = pthread_getattr_np(pthread_self(), &attr); |
|
676 |
||
677 |
// JVM needs to know exact stack location, abort if it fails |
|
678 |
if (rslt != 0) { |
|
679 |
if (rslt == ENOMEM) { |
|
680 |
vm_exit_out_of_memory(0, "pthread_getattr_np"); |
|
681 |
} else { |
|
682 |
fatal1("pthread_getattr_np failed with errno = %d", rslt); |
|
683 |
} |
|
684 |
} |
|
685 |
||
686 |
if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { |
|
687 |
fatal("Can not locate current stack attributes!"); |
|
688 |
} |
|
689 |
||
690 |
pthread_attr_destroy(&attr); |
|
691 |
||
692 |
} |
|
693 |
assert(os::current_stack_pointer() >= *bottom && |
|
694 |
os::current_stack_pointer() < *bottom + *size, "just checking"); |
|
695 |
} |
|
696 |
||
697 |
address os::current_stack_base() { |
|
698 |
address bottom; |
|
699 |
size_t size; |
|
700 |
current_stack_region(&bottom, &size); |
|
701 |
return (bottom + size); |
|
702 |
} |
|
703 |
||
704 |
size_t os::current_stack_size() { |
|
705 |
// stack size includes normal stack and HotSpot guard pages |
|
706 |
address bottom; |
|
707 |
size_t size; |
|
708 |
current_stack_region(&bottom, &size); |
|
709 |
return size; |
|
710 |
} |
|
711 |
||
712 |
///////////////////////////////////////////////////////////////////////////// |
|
713 |
// helper functions for fatal error handler |
|
714 |
||
715 |
void os::print_context(outputStream *st, void *context) { |
|
716 |
if (context == NULL) return; |
|
717 |
||
718 |
ucontext_t *uc = (ucontext_t*)context; |
|
719 |
st->print_cr("Registers:"); |
|
720 |
#ifdef AMD64 |
|
721 |
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); |
|
722 |
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); |
|
723 |
st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); |
|
724 |
st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); |
|
725 |
st->cr(); |
|
726 |
st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); |
|
727 |
st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); |
|
728 |
st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); |
|
729 |
st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); |
|
730 |
st->cr(); |
|
731 |
st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); |
|
732 |
st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); |
|
733 |
st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); |
|
734 |
st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); |
|
735 |
st->cr(); |
|
736 |
st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); |
|
737 |
st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); |
|
738 |
st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); |
|
739 |
st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); |
|
740 |
st->cr(); |
|
741 |
st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); |
|
742 |
st->print(", EFL=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); |
|
743 |
st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]); |
|
744 |
st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]); |
|
745 |
st->cr(); |
|
746 |
st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]); |
|
747 |
#else |
|
748 |
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]); |
|
749 |
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]); |
|
750 |
st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]); |
|
751 |
st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]); |
|
752 |
st->cr(); |
|
753 |
st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]); |
|
754 |
st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]); |
|
755 |
st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]); |
|
756 |
st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]); |
|
757 |
st->cr(); |
|
758 |
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]); |
|
759 |
st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2); |
|
760 |
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); |
|
761 |
#endif // AMD64 |
|
762 |
st->cr(); |
|
763 |
st->cr(); |
|
764 |
||
765 |
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); |
|
766 |
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); |
|
767 |
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); |
|
768 |
st->cr(); |
|
769 |
||
770 |
// Note: it may be unsafe to inspect memory near pc. For example, pc may |
|
771 |
// point to garbage if entry point in an nmethod is corrupted. Leave |
|
772 |
// this at the end, and hope for the best. |
|
773 |
address pc = os::Linux::ucontext_get_pc(uc); |
|
774 |
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); |
|
775 |
print_hex_dump(st, pc - 16, pc + 16, sizeof(char)); |
|
776 |
} |
|
777 |
||
778 |
void os::setup_fpu() { |
|
779 |
#ifndef AMD64 |
|
780 |
address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); |
|
781 |
__asm__ volatile ( "fldcw (%0)" : |
|
782 |
: "r" (fpu_cntrl) : "memory"); |
|
783 |
#endif // !AMD64 |
|
784 |
} |