author | twisti |
Mon, 23 Feb 2009 12:02:30 -0800 | |
changeset 2111 | dab8a43dd738 |
parent 1664 | fc9ed50498fb |
child 2131 | 98f9cef66a34 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2111 | 2 |
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// do not include precompiled header file |
|
26 |
# include "incls/_os_solaris_x86.cpp.incl" |
|
27 |
||
28 |
// put OS-includes here |
|
29 |
# include <sys/types.h> |
|
30 |
# include <sys/mman.h> |
|
31 |
# include <pthread.h> |
|
32 |
# include <signal.h> |
|
33 |
# include <setjmp.h> |
|
34 |
# include <errno.h> |
|
35 |
# include <dlfcn.h> |
|
36 |
# include <stdio.h> |
|
37 |
# include <unistd.h> |
|
38 |
# include <sys/resource.h> |
|
39 |
# include <thread.h> |
|
40 |
# include <sys/stat.h> |
|
41 |
# include <sys/time.h> |
|
42 |
# include <sys/filio.h> |
|
43 |
# include <sys/utsname.h> |
|
44 |
# include <sys/systeminfo.h> |
|
45 |
# include <sys/socket.h> |
|
46 |
# include <sys/trap.h> |
|
47 |
# include <sys/lwp.h> |
|
48 |
# include <pwd.h> |
|
49 |
# include <poll.h> |
|
50 |
# include <sys/lwp.h> |
|
51 |
# include <procfs.h> // see comment in <sys/procfs.h> |
|
52 |
||
53 |
#ifndef AMD64 |
|
54 |
// QQQ seems useless at this point |
|
55 |
# define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later |
|
56 |
#endif // AMD64 |
|
57 |
# include <sys/procfs.h> // see comment in <sys/procfs.h> |
|
58 |
||
59 |
||
60 |
#define MAX_PATH (2 * K) |
|
61 |
||
62 |
// Minimum stack size for the VM. It's easier to document a constant value |
|
63 |
// but it's different for x86 and sparc because the page sizes are different. |
|
64 |
#ifdef AMD64 |
|
65 |
size_t os::Solaris::min_stack_allowed = 224*K; |
|
66 |
#define REG_SP REG_RSP |
|
67 |
#define REG_PC REG_RIP |
|
68 |
#define REG_FP REG_RBP |
|
69 |
#else |
|
70 |
size_t os::Solaris::min_stack_allowed = 64*K; |
|
71 |
#define REG_SP UESP |
|
72 |
#define REG_PC EIP |
|
73 |
#define REG_FP EBP |
|
74 |
// 4900493 counter to prevent runaway LDTR refresh attempt |
|
75 |
||
76 |
static volatile int ldtr_refresh = 0; |
|
77 |
// the libthread instruction that faults because of the stale LDTR |
|
78 |
||
79 |
static const unsigned char movlfs[] = { 0x8e, 0xe0 // movl %eax,%fs |
|
80 |
}; |
|
81 |
#endif // AMD64 |
|
82 |
||
83 |
char* os::non_memory_address_word() { |
|
84 |
// Must never look like an address returned by reserve_memory, |
|
85 |
// even in its subfields (as defined by the CPU immediate fields, |
|
86 |
// if the CPU splits constants across multiple instructions). |
|
87 |
return (char*) -1; |
|
88 |
} |
|
89 |
||
90 |
// |
|
91 |
// Validate a ucontext retrieved from walking a uc_link of a ucontext. |
|
92 |
// There are issues with libthread giving out uc_links for different threads |
|
93 |
// on the same uc_link chain and bad or circular links. |
|
94 |
// |
|
95 |
bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { |
|
96 |
if (valid >= suspect || |
|
97 |
valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || |
|
98 |
valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || |
|
99 |
valid->uc_stack.ss_size != suspect->uc_stack.ss_size) { |
|
100 |
DEBUG_ONLY(tty->print_cr("valid_ucontext: failed test 1");) |
|
101 |
return false; |
|
102 |
} |
|
103 |
||
104 |
if (thread->is_Java_thread()) { |
|
105 |
if (!valid_stack_address(thread, (address)suspect)) { |
|
106 |
DEBUG_ONLY(tty->print_cr("valid_ucontext: uc_link not in thread stack");) |
|
107 |
return false; |
|
108 |
} |
|
109 |
if (!valid_stack_address(thread, (address) suspect->uc_mcontext.gregs[REG_SP])) { |
|
110 |
DEBUG_ONLY(tty->print_cr("valid_ucontext: stackpointer not in thread stack");) |
|
111 |
return false; |
|
112 |
} |
|
113 |
} |
|
114 |
return true; |
|
115 |
} |
|
116 |
||
117 |
// We will only follow one level of uc_link since there are libthread |
|
118 |
// issues with ucontext linking and it is better to be safe and just |
|
119 |
// let caller retry later. |
|
120 |
ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, |
|
121 |
ucontext_t *uc) { |
|
122 |
||
123 |
ucontext_t *retuc = NULL; |
|
124 |
||
125 |
if (uc != NULL) { |
|
126 |
if (uc->uc_link == NULL) { |
|
127 |
// cannot validate without uc_link so accept current ucontext |
|
128 |
retuc = uc; |
|
129 |
} else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { |
|
130 |
// first ucontext is valid so try the next one |
|
131 |
uc = uc->uc_link; |
|
132 |
if (uc->uc_link == NULL) { |
|
133 |
// cannot validate without uc_link so accept current ucontext |
|
134 |
retuc = uc; |
|
135 |
} else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { |
|
136 |
// the ucontext one level down is also valid so return it |
|
137 |
retuc = uc; |
|
138 |
} |
|
139 |
} |
|
140 |
} |
|
141 |
return retuc; |
|
142 |
} |
|
143 |
||
144 |
// Assumes ucontext is valid |
|
145 |
ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { |
|
146 |
return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]); |
|
147 |
} |
|
148 |
||
149 |
// Assumes ucontext is valid |
|
150 |
intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { |
|
151 |
return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; |
|
152 |
} |
|
153 |
||
154 |
// Assumes ucontext is valid |
|
155 |
intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { |
|
156 |
return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; |
|
157 |
} |
|
158 |
||
159 |
// For Forte Analyzer AsyncGetCallTrace profiling support - thread |
|
160 |
// is currently interrupted by SIGPROF. |
|
161 |
// |
|
162 |
// The difference between this and os::fetch_frame_from_context() is that |
|
163 |
// here we try to skip nested signal frames. |
|
164 |
ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, |
|
165 |
ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { |
|
166 |
||
167 |
assert(thread != NULL, "just checking"); |
|
168 |
assert(ret_sp != NULL, "just checking"); |
|
169 |
assert(ret_fp != NULL, "just checking"); |
|
170 |
||
171 |
ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); |
|
172 |
return os::fetch_frame_from_context(luc, ret_sp, ret_fp); |
|
173 |
} |
|
174 |
||
175 |
ExtendedPC os::fetch_frame_from_context(void* ucVoid, |
|
176 |
intptr_t** ret_sp, intptr_t** ret_fp) { |
|
177 |
||
178 |
ExtendedPC epc; |
|
179 |
ucontext_t *uc = (ucontext_t*)ucVoid; |
|
180 |
||
181 |
if (uc != NULL) { |
|
182 |
epc = os::Solaris::ucontext_get_ExtendedPC(uc); |
|
183 |
if (ret_sp) *ret_sp = os::Solaris::ucontext_get_sp(uc); |
|
184 |
if (ret_fp) *ret_fp = os::Solaris::ucontext_get_fp(uc); |
|
185 |
} else { |
|
186 |
// construct empty ExtendedPC for return value checking |
|
187 |
epc = ExtendedPC(NULL); |
|
188 |
if (ret_sp) *ret_sp = (intptr_t *)NULL; |
|
189 |
if (ret_fp) *ret_fp = (intptr_t *)NULL; |
|
190 |
} |
|
191 |
||
192 |
return epc; |
|
193 |
} |
|
194 |
||
195 |
frame os::fetch_frame_from_context(void* ucVoid) { |
|
196 |
intptr_t* sp; |
|
197 |
intptr_t* fp; |
|
198 |
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
|
199 |
return frame(sp, fp, epc.pc()); |
|
200 |
} |
|
201 |
||
202 |
frame os::get_sender_for_C_frame(frame* fr) { |
|
203 |
return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); |
|
204 |
} |
|
205 |
||
1659
b9a3819ac7c6
6773838: There is no calling stack for Compiler thread in hs_err file on x86
coleenp
parents:
670
diff
changeset
|
206 |
extern "C" intptr_t *_get_current_fp(); // in .il file |
1 | 207 |
|
208 |
frame os::current_frame() { |
|
1659
b9a3819ac7c6
6773838: There is no calling stack for Compiler thread in hs_err file on x86
coleenp
parents:
670
diff
changeset
|
209 |
intptr_t* fp = _get_current_fp(); // it's inlined so want current fp |
1 | 210 |
frame myframe((intptr_t*)os::current_stack_pointer(), |
211 |
(intptr_t*)fp, |
|
212 |
CAST_FROM_FN_PTR(address, os::current_frame)); |
|
213 |
if (os::is_first_C_frame(&myframe)) { |
|
214 |
// stack is not walkable |
|
354
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
215 |
frame ret; // This will be a null useless frame |
3b42d6fdcb82
6603919: Stackwalking crash on x86 -server with Sun Studio's collect -j on
sgoldman
parents:
1
diff
changeset
|
216 |
return ret; |
1 | 217 |
} else { |
218 |
return os::get_sender_for_C_frame(&myframe); |
|
219 |
} |
|
220 |
} |
|
221 |
||
222 |
// This is a simple callback that just fetches a PC for an interrupted thread. |
|
223 |
// The thread need not be suspended and the fetched PC is just a hint. |
|
224 |
// This one is currently used for profiling the VMThread ONLY! |
|
225 |
||
226 |
// Must be synchronous |
|
227 |
void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { |
|
228 |
Thread* thread = args->thread(); |
|
229 |
ucontext_t* uc = args->ucontext(); |
|
230 |
intptr_t* sp; |
|
231 |
||
232 |
assert(ProfileVM && thread->is_VM_thread(), "just checking"); |
|
233 |
||
234 |
ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); |
|
235 |
_addr = new_addr; |
|
236 |
} |
|
237 |
||
238 |
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { |
|
239 |
char lwpstatusfile[PROCFILE_LENGTH]; |
|
240 |
int lwpfd, err; |
|
241 |
||
242 |
if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs)) |
|
243 |
return (err); |
|
244 |
if (*flags == TRS_LWPID) { |
|
245 |
sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(), |
|
246 |
*lwp); |
|
247 |
if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) { |
|
248 |
perror("thr_mutator_status: open lwpstatus"); |
|
249 |
return (EINVAL); |
|
250 |
} |
|
251 |
if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) != |
|
252 |
sizeof (lwpstatus_t)) { |
|
253 |
perror("thr_mutator_status: read lwpstatus"); |
|
254 |
(void) close(lwpfd); |
|
255 |
return (EINVAL); |
|
256 |
} |
|
257 |
(void) close(lwpfd); |
|
258 |
} |
|
259 |
return (0); |
|
260 |
} |
|
261 |
||
262 |
#ifndef AMD64 |
|
263 |
||
264 |
// Detecting SSE support by OS |
|
265 |
// From solaris_i486.s |
|
266 |
extern "C" bool sse_check(); |
|
267 |
extern "C" bool sse_unavailable(); |
|
268 |
||
269 |
enum { SSE_UNKNOWN, SSE_NOT_SUPPORTED, SSE_SUPPORTED}; |
|
270 |
static int sse_status = SSE_UNKNOWN; |
|
271 |
||
272 |
||
273 |
static void check_for_sse_support() { |
|
274 |
if (!VM_Version::supports_sse()) { |
|
275 |
sse_status = SSE_NOT_SUPPORTED; |
|
276 |
return; |
|
277 |
} |
|
278 |
// looking for _sse_hw in libc.so, if it does not exist or |
|
279 |
// the value (int) is 0, OS has no support for SSE |
|
280 |
int *sse_hwp; |
|
281 |
void *h; |
|
282 |
||
283 |
if ((h=dlopen("/usr/lib/libc.so", RTLD_LAZY)) == NULL) { |
|
284 |
//open failed, presume no support for SSE |
|
285 |
sse_status = SSE_NOT_SUPPORTED; |
|
286 |
return; |
|
287 |
} |
|
288 |
if ((sse_hwp = (int *)dlsym(h, "_sse_hw")) == NULL) { |
|
289 |
sse_status = SSE_NOT_SUPPORTED; |
|
290 |
} else if (*sse_hwp == 0) { |
|
291 |
sse_status = SSE_NOT_SUPPORTED; |
|
292 |
} |
|
293 |
dlclose(h); |
|
294 |
||
295 |
if (sse_status == SSE_UNKNOWN) { |
|
296 |
bool (*try_sse)() = (bool (*)())sse_check; |
|
297 |
sse_status = (*try_sse)() ? SSE_SUPPORTED : SSE_NOT_SUPPORTED; |
|
298 |
} |
|
299 |
||
300 |
} |
|
301 |
||
2111 | 302 |
#endif // AMD64 |
303 |
||
1 | 304 |
bool os::supports_sse() { |
2111 | 305 |
#ifdef AMD64 |
306 |
return true; |
|
307 |
#else |
|
1 | 308 |
if (sse_status == SSE_UNKNOWN) |
309 |
check_for_sse_support(); |
|
310 |
return sse_status == SSE_SUPPORTED; |
|
2111 | 311 |
#endif // AMD64 |
1 | 312 |
} |
313 |
||
314 |
bool os::is_allocatable(size_t bytes) { |
|
315 |
#ifdef AMD64 |
|
316 |
return true; |
|
317 |
#else |
|
318 |
||
319 |
if (bytes < 2 * G) { |
|
320 |
return true; |
|
321 |
} |
|
322 |
||
323 |
char* addr = reserve_memory(bytes, NULL); |
|
324 |
||
325 |
if (addr != NULL) { |
|
326 |
release_memory(addr, bytes); |
|
327 |
} |
|
328 |
||
329 |
return addr != NULL; |
|
330 |
#endif // AMD64 |
|
331 |
||
332 |
} |
|
333 |
||
334 |
extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); |
|
335 |
||
336 |
extern "C" void Fetch32PFI () ; |
|
337 |
extern "C" void Fetch32Resume () ; |
|
338 |
#ifdef AMD64 |
|
339 |
extern "C" void FetchNPFI () ; |
|
340 |
extern "C" void FetchNResume () ; |
|
341 |
#endif // AMD64 |
|
342 |
||
343 |
int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) { |
|
344 |
ucontext_t* uc = (ucontext_t*) ucVoid; |
|
345 |
||
346 |
#ifndef AMD64 |
|
347 |
if (sig == SIGILL && info->si_addr == (caddr_t)sse_check) { |
|
348 |
// the SSE instruction faulted. supports_sse() need return false. |
|
349 |
uc->uc_mcontext.gregs[EIP] = (greg_t)sse_unavailable; |
|
350 |
return true; |
|
351 |
} |
|
352 |
#endif // !AMD64 |
|
353 |
||
354 |
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady |
|
355 |
||
356 |
SignalHandlerMark shm(t); |
|
357 |
||
358 |
if(sig == SIGPIPE || sig == SIGXFSZ) { |
|
359 |
if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
|
360 |
return true; |
|
361 |
} else { |
|
362 |
if (PrintMiscellaneous && (WizardMode || Verbose)) { |
|
363 |
char buf[64]; |
|
364 |
warning("Ignoring %s - see 4229104 or 6499219", |
|
365 |
os::exception_name(sig, buf, sizeof(buf))); |
|
366 |
||
367 |
} |
|
368 |
return true; |
|
369 |
} |
|
370 |
} |
|
371 |
||
372 |
JavaThread* thread = NULL; |
|
373 |
VMThread* vmthread = NULL; |
|
374 |
||
375 |
if (os::Solaris::signal_handlers_are_installed) { |
|
376 |
if (t != NULL ){ |
|
377 |
if(t->is_Java_thread()) { |
|
378 |
thread = (JavaThread*)t; |
|
379 |
} |
|
380 |
else if(t->is_VM_thread()){ |
|
381 |
vmthread = (VMThread *)t; |
|
382 |
} |
|
383 |
} |
|
384 |
} |
|
385 |
||
386 |
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); |
|
387 |
||
388 |
if (sig == os::Solaris::SIGasync()) { |
|
389 |
if(thread){ |
|
390 |
OSThread::InterruptArguments args(thread, uc); |
|
391 |
thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); |
|
392 |
return true; |
|
393 |
} |
|
394 |
else if(vmthread){ |
|
395 |
OSThread::InterruptArguments args(vmthread, uc); |
|
396 |
vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); |
|
397 |
return true; |
|
398 |
} else if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
|
399 |
return true; |
|
400 |
} else { |
|
401 |
// If os::Solaris::SIGasync not chained, and this is a non-vm and |
|
402 |
// non-java thread |
|
403 |
return true; |
|
404 |
} |
|
405 |
} |
|
406 |
||
407 |
if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { |
|
408 |
// can't decode this kind of signal |
|
409 |
info = NULL; |
|
410 |
} else { |
|
411 |
assert(sig == info->si_signo, "bad siginfo"); |
|
412 |
} |
|
413 |
||
414 |
// decide if this trap can be handled by a stub |
|
415 |
address stub = NULL; |
|
416 |
||
417 |
address pc = NULL; |
|
418 |
||
419 |
//%note os_trap_1 |
|
420 |
if (info != NULL && uc != NULL && thread != NULL) { |
|
421 |
// factor me: getPCfromContext |
|
422 |
pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
|
423 |
||
424 |
// SafeFetch32() support |
|
425 |
if (pc == (address) Fetch32PFI) { |
|
426 |
uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; |
|
427 |
return true ; |
|
428 |
} |
|
429 |
#ifdef AMD64 |
|
430 |
if (pc == (address) FetchNPFI) { |
|
431 |
uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; |
|
432 |
return true ; |
|
433 |
} |
|
434 |
#endif // AMD64 |
|
435 |
||
436 |
// Handle ALL stack overflow variations here |
|
437 |
if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { |
|
438 |
address addr = (address) info->si_addr; |
|
439 |
if (thread->in_stack_yellow_zone(addr)) { |
|
440 |
thread->disable_stack_yellow_zone(); |
|
441 |
if (thread->thread_state() == _thread_in_Java) { |
|
442 |
// Throw a stack overflow exception. Guard pages will be reenabled |
|
443 |
// while unwinding the stack. |
|
444 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
|
445 |
} else { |
|
446 |
// Thread was in the vm or native code. Return and try to finish. |
|
447 |
return true; |
|
448 |
} |
|
449 |
} else if (thread->in_stack_red_zone(addr)) { |
|
450 |
// Fatal red zone violation. Disable the guard pages and fall through |
|
451 |
// to handle_unexpected_exception way down below. |
|
452 |
thread->disable_stack_red_zone(); |
|
453 |
tty->print_raw_cr("An irrecoverable stack overflow has occurred."); |
|
454 |
} |
|
455 |
} |
|
456 |
||
457 |
if (thread->thread_state() == _thread_in_vm) { |
|
458 |
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) { |
|
459 |
stub = StubRoutines::handler_for_unsafe_access(); |
|
460 |
} |
|
461 |
} |
|
462 |
||
463 |
if (thread->thread_state() == _thread_in_Java) { |
|
464 |
// Support Safepoint Polling |
|
465 |
if ( sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { |
|
466 |
stub = SharedRuntime::get_poll_stub(pc); |
|
467 |
} |
|
468 |
else if (sig == SIGBUS && info->si_code == BUS_OBJERR) { |
|
469 |
// BugId 4454115: A read from a MappedByteBuffer can fault |
|
470 |
// here if the underlying file has been truncated. |
|
471 |
// Do not crash the VM in such a case. |
|
472 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); |
|
473 |
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; |
|
474 |
if (nm != NULL && nm->has_unsafe_access()) { |
|
475 |
stub = StubRoutines::handler_for_unsafe_access(); |
|
476 |
} |
|
477 |
} |
|
478 |
else |
|
479 |
if (sig == SIGFPE && info->si_code == FPE_INTDIV) { |
|
480 |
// integer divide by zero |
|
481 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
|
482 |
} |
|
483 |
#ifndef AMD64 |
|
484 |
else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) { |
|
485 |
// floating-point divide by zero |
|
486 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
|
487 |
} |
|
488 |
else if (sig == SIGFPE && info->si_code == FPE_FLTINV) { |
|
489 |
// The encoding of D2I in i486.ad can cause an exception prior |
|
490 |
// to the fist instruction if there was an invalid operation |
|
491 |
// pending. We want to dismiss that exception. From the win_32 |
|
492 |
// side it also seems that if it really was the fist causing |
|
493 |
// the exception that we do the d2i by hand with different |
|
494 |
// rounding. Seems kind of weird. QQQ TODO |
|
495 |
// Note that we take the exception at the NEXT floating point instruction. |
|
496 |
if (pc[0] == 0xDB) { |
|
497 |
assert(pc[0] == 0xDB, "not a FIST opcode"); |
|
498 |
assert(pc[1] == 0x14, "not a FIST opcode"); |
|
499 |
assert(pc[2] == 0x24, "not a FIST opcode"); |
|
500 |
return true; |
|
501 |
} else { |
|
502 |
assert(pc[-3] == 0xDB, "not an flt invalid opcode"); |
|
503 |
assert(pc[-2] == 0x14, "not an flt invalid opcode"); |
|
504 |
assert(pc[-1] == 0x24, "not an flt invalid opcode"); |
|
505 |
} |
|
506 |
} |
|
507 |
else if (sig == SIGFPE ) { |
|
508 |
tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code); |
|
509 |
} |
|
510 |
#endif // !AMD64 |
|
511 |
||
512 |
// QQQ It doesn't seem that we need to do this on x86 because we should be able |
|
513 |
// to return properly from the handler without this extra stuff on the back side. |
|
514 |
||
515 |
else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { |
|
516 |
// Determination of interpreter/vtable stub/compiled code null exception |
|
517 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); |
|
518 |
} |
|
519 |
} |
|
520 |
||
521 |
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in |
|
522 |
// and the heap gets shrunk before the field access. |
|
523 |
if ((sig == SIGSEGV) || (sig == SIGBUS)) { |
|
524 |
address addr = JNI_FastGetField::find_slowcase_pc(pc); |
|
525 |
if (addr != (address)-1) { |
|
526 |
stub = addr; |
|
527 |
} |
|
528 |
} |
|
529 |
||
530 |
// Check to see if we caught the safepoint code in the |
|
531 |
// process of write protecting the memory serialization page. |
|
532 |
// It write enables the page immediately after protecting it |
|
533 |
// so we can just return to retry the write. |
|
534 |
if ((sig == SIGSEGV) && |
|
535 |
os::is_memory_serialize_page(thread, (address)info->si_addr)) { |
|
536 |
// Block current thread until the memory serialize page permission restored. |
|
537 |
os::block_on_serialize_page_trap(); |
|
538 |
return true; |
|
539 |
} |
|
540 |
} |
|
541 |
||
542 |
// Execution protection violation |
|
543 |
// |
|
544 |
// Preventative code for future versions of Solaris which may |
|
545 |
// enable execution protection when running the 32-bit VM on AMD64. |
|
546 |
// |
|
547 |
// This should be kept as the last step in the triage. We don't |
|
548 |
// have a dedicated trap number for a no-execute fault, so be |
|
549 |
// conservative and allow other handlers the first shot. |
|
550 |
// |
|
551 |
// Note: We don't test that info->si_code == SEGV_ACCERR here. |
|
552 |
// this si_code is so generic that it is almost meaningless; and |
|
553 |
// the si_code for this condition may change in the future. |
|
554 |
// Furthermore, a false-positive should be harmless. |
|
555 |
if (UnguardOnExecutionViolation > 0 && |
|
556 |
(sig == SIGSEGV || sig == SIGBUS) && |
|
557 |
uc->uc_mcontext.gregs[TRAPNO] == T_PGFLT) { // page fault |
|
558 |
int page_size = os::vm_page_size(); |
|
559 |
address addr = (address) info->si_addr; |
|
560 |
address pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
|
561 |
// Make sure the pc and the faulting address are sane. |
|
562 |
// |
|
563 |
// If an instruction spans a page boundary, and the page containing |
|
564 |
// the beginning of the instruction is executable but the following |
|
565 |
// page is not, the pc and the faulting address might be slightly |
|
566 |
// different - we still want to unguard the 2nd page in this case. |
|
567 |
// |
|
568 |
// 15 bytes seems to be a (very) safe value for max instruction size. |
|
569 |
bool pc_is_near_addr = |
|
570 |
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); |
|
571 |
bool instr_spans_page_boundary = |
|
572 |
(align_size_down((intptr_t) pc ^ (intptr_t) addr, |
|
573 |
(intptr_t) page_size) > 0); |
|
574 |
||
575 |
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { |
|
576 |
static volatile address last_addr = |
|
577 |
(address) os::non_memory_address_word(); |
|
578 |
||
579 |
// In conservative mode, don't unguard unless the address is in the VM |
|
580 |
if (addr != last_addr && |
|
581 |
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { |
|
582 |
||
1664
fc9ed50498fb
6727377: VM stack guard pages on Windows should PAGE_READWRITE not PAGE_EXECUTE_READWRITE
coleenp
parents:
1659
diff
changeset
|
583 |
// Make memory rwx and retry |
1 | 584 |
address page_start = |
585 |
(address) align_size_down((intptr_t) addr, (intptr_t) page_size); |
|
1664
fc9ed50498fb
6727377: VM stack guard pages on Windows should PAGE_READWRITE not PAGE_EXECUTE_READWRITE
coleenp
parents:
1659
diff
changeset
|
586 |
bool res = os::protect_memory((char*) page_start, page_size, |
fc9ed50498fb
6727377: VM stack guard pages on Windows should PAGE_READWRITE not PAGE_EXECUTE_READWRITE
coleenp
parents:
1659
diff
changeset
|
587 |
os::MEM_PROT_RWX); |
1 | 588 |
|
589 |
if (PrintMiscellaneous && Verbose) { |
|
590 |
char buf[256]; |
|
591 |
jio_snprintf(buf, sizeof(buf), "Execution protection violation " |
|
592 |
"at " INTPTR_FORMAT |
|
593 |
", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, |
|
594 |
page_start, (res ? "success" : "failed"), errno); |
|
595 |
tty->print_raw_cr(buf); |
|
596 |
} |
|
597 |
stub = pc; |
|
598 |
||
599 |
// Set last_addr so if we fault again at the same address, we don't end |
|
600 |
// up in an endless loop. |
|
601 |
// |
|
602 |
// There are two potential complications here. Two threads trapping at |
|
603 |
// the same address at the same time could cause one of the threads to |
|
604 |
// think it already unguarded, and abort the VM. Likely very rare. |
|
605 |
// |
|
606 |
// The other race involves two threads alternately trapping at |
|
607 |
// different addresses and failing to unguard the page, resulting in |
|
608 |
// an endless loop. This condition is probably even more unlikely than |
|
609 |
// the first. |
|
610 |
// |
|
611 |
// Although both cases could be avoided by using locks or thread local |
|
612 |
// last_addr, these solutions are unnecessary complication: this |
|
613 |
// handler is a best-effort safety net, not a complete solution. It is |
|
614 |
// disabled by default and should only be used as a workaround in case |
|
615 |
// we missed any no-execute-unsafe VM code. |
|
616 |
||
617 |
last_addr = addr; |
|
618 |
} |
|
619 |
} |
|
620 |
} |
|
621 |
||
622 |
if (stub != NULL) { |
|
623 |
// save all thread context in case we need to restore it |
|
624 |
||
625 |
if (thread != NULL) thread->set_saved_exception_pc(pc); |
|
626 |
// 12/02/99: On Sparc it appears that the full context is also saved |
|
627 |
// but as yet, no one looks at or restores that saved context |
|
628 |
// factor me: setPC |
|
629 |
uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; |
|
630 |
return true; |
|
631 |
} |
|
632 |
||
633 |
// signal-chaining |
|
634 |
if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
|
635 |
return true; |
|
636 |
} |
|
637 |
||
638 |
#ifndef AMD64 |
|
639 |
// Workaround (bug 4900493) for Solaris kernel bug 4966651. |
|
640 |
// Handle an undefined selector caused by an attempt to assign |
|
641 |
// fs in libthread getipriptr(). With the current libthread design every 512 |
|
642 |
// thread creations the LDT for a private thread data structure is extended |
|
643 |
// and thre is a hazard that and another thread attempting a thread creation |
|
644 |
// will use a stale LDTR that doesn't reflect the structure's growth, |
|
645 |
// causing a GP fault. |
|
646 |
// Enforce the probable limit of passes through here to guard against an |
|
647 |
// infinite loop if some other move to fs caused the GP fault. Note that |
|
648 |
// this loop counter is ultimately a heuristic as it is possible for |
|
649 |
// more than one thread to generate this fault at a time in an MP system. |
|
650 |
// In the case of the loop count being exceeded or if the poll fails |
|
651 |
// just fall through to a fatal error. |
|
652 |
// If there is some other source of T_GPFLT traps and the text at EIP is |
|
653 |
// unreadable this code will loop infinitely until the stack is exausted. |
|
654 |
// The key to diagnosis in this case is to look for the bottom signal handler |
|
655 |
// frame. |
|
656 |
||
657 |
if(! IgnoreLibthreadGPFault) { |
|
658 |
if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) { |
|
659 |
const unsigned char *p = |
|
660 |
(unsigned const char *) uc->uc_mcontext.gregs[EIP]; |
|
661 |
||
662 |
// Expected instruction? |
|
663 |
||
664 |
if(p[0] == movlfs[0] && p[1] == movlfs[1]) { |
|
665 |
||
666 |
Atomic::inc(&ldtr_refresh); |
|
667 |
||
668 |
// Infinite loop? |
|
669 |
||
670 |
if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { |
|
671 |
||
672 |
// No, force scheduling to get a fresh view of the LDTR |
|
673 |
||
674 |
if(poll(NULL, 0, 10) == 0) { |
|
675 |
||
676 |
// Retry the move |
|
677 |
||
678 |
return false; |
|
679 |
} |
|
680 |
} |
|
681 |
} |
|
682 |
} |
|
683 |
} |
|
684 |
#endif // !AMD64 |
|
685 |
||
686 |
if (!abort_if_unrecognized) { |
|
687 |
// caller wants another chance, so give it to him |
|
688 |
return false; |
|
689 |
} |
|
690 |
||
691 |
if (!os::Solaris::libjsig_is_loaded) { |
|
692 |
struct sigaction oldAct; |
|
693 |
sigaction(sig, (struct sigaction *)0, &oldAct); |
|
694 |
if (oldAct.sa_sigaction != signalHandler) { |
|
695 |
void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) |
|
696 |
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler); |
|
697 |
warning("Unexpected Signal %d occured under user-defined signal handler %#lx", sig, (long)sighand); |
|
698 |
} |
|
699 |
} |
|
700 |
||
701 |
if (pc == NULL && uc != NULL) { |
|
702 |
pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
|
703 |
} |
|
704 |
||
705 |
// unmask current signal |
|
706 |
sigset_t newset; |
|
707 |
sigemptyset(&newset); |
|
708 |
sigaddset(&newset, sig); |
|
709 |
sigprocmask(SIG_UNBLOCK, &newset, NULL); |
|
710 |
||
711 |
VMError err(t, sig, pc, info, ucVoid); |
|
712 |
err.report_and_die(); |
|
713 |
||
714 |
ShouldNotReachHere(); |
|
715 |
} |
|
716 |
||
717 |
void os::print_context(outputStream *st, void *context) { |
|
718 |
if (context == NULL) return; |
|
719 |
||
720 |
ucontext_t *uc = (ucontext_t*)context; |
|
721 |
st->print_cr("Registers:"); |
|
722 |
#ifdef AMD64 |
|
723 |
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); |
|
724 |
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); |
|
725 |
st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); |
|
726 |
st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); |
|
727 |
st->cr(); |
|
728 |
st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); |
|
729 |
st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); |
|
730 |
st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); |
|
731 |
st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); |
|
732 |
st->cr(); |
|
733 |
st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); |
|
734 |
st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); |
|
735 |
st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); |
|
736 |
st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); |
|
737 |
st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); |
|
738 |
st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); |
|
739 |
st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); |
|
740 |
st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); |
|
741 |
st->cr(); |
|
742 |
st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); |
|
743 |
st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]); |
|
744 |
#else |
|
745 |
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]); |
|
746 |
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]); |
|
747 |
st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]); |
|
748 |
st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]); |
|
749 |
st->cr(); |
|
750 |
st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]); |
|
751 |
st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]); |
|
752 |
st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]); |
|
753 |
st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]); |
|
754 |
st->cr(); |
|
755 |
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]); |
|
756 |
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]); |
|
757 |
#endif // AMD64 |
|
758 |
st->cr(); |
|
759 |
st->cr(); |
|
760 |
||
761 |
intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc); |
|
762 |
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); |
|
763 |
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); |
|
764 |
st->cr(); |
|
765 |
||
766 |
// Note: it may be unsafe to inspect memory near pc. For example, pc may |
|
767 |
// point to garbage if entry point in an nmethod is corrupted. Leave |
|
768 |
// this at the end, and hope for the best. |
|
769 |
ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc); |
|
770 |
address pc = epc.pc(); |
|
771 |
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); |
|
772 |
print_hex_dump(st, pc - 16, pc + 16, sizeof(char)); |
|
773 |
} |
|
774 |
||
775 |
#ifdef AMD64 |
|
776 |
void os::Solaris::init_thread_fpu_state(void) { |
|
777 |
// Nothing to do |
|
778 |
} |
|
779 |
#else |
|
780 |
// From solaris_i486.s |
|
781 |
extern "C" void fixcw(); |
|
782 |
||
783 |
void os::Solaris::init_thread_fpu_state(void) { |
|
784 |
// Set fpu to 53 bit precision. This happens too early to use a stub. |
|
785 |
fixcw(); |
|
786 |
} |
|
787 |
||
788 |
// These routines are the initial value of atomic_xchg_entry(), |
|
789 |
// atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry() |
|
790 |
// until initialization is complete. |
|
791 |
// TODO - replace with .il implementation when compiler supports it. |
|
792 |
||
793 |
typedef jint xchg_func_t (jint, volatile jint*); |
|
794 |
typedef jint cmpxchg_func_t (jint, volatile jint*, jint); |
|
795 |
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); |
|
796 |
typedef jint add_func_t (jint, volatile jint*); |
|
797 |
typedef void fence_func_t (); |
|
798 |
||
799 |
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { |
|
800 |
// try to use the stub: |
|
801 |
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); |
|
802 |
||
803 |
if (func != NULL) { |
|
804 |
os::atomic_xchg_func = func; |
|
805 |
return (*func)(exchange_value, dest); |
|
806 |
} |
|
807 |
assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
|
808 |
||
809 |
jint old_value = *dest; |
|
810 |
*dest = exchange_value; |
|
811 |
return old_value; |
|
812 |
} |
|
813 |
||
814 |
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { |
|
815 |
// try to use the stub: |
|
816 |
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); |
|
817 |
||
818 |
if (func != NULL) { |
|
819 |
os::atomic_cmpxchg_func = func; |
|
820 |
return (*func)(exchange_value, dest, compare_value); |
|
821 |
} |
|
822 |
assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
|
823 |
||
824 |
jint old_value = *dest; |
|
825 |
if (old_value == compare_value) |
|
826 |
*dest = exchange_value; |
|
827 |
return old_value; |
|
828 |
} |
|
829 |
||
830 |
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
|
831 |
// try to use the stub: |
|
832 |
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); |
|
833 |
||
834 |
if (func != NULL) { |
|
835 |
os::atomic_cmpxchg_long_func = func; |
|
836 |
return (*func)(exchange_value, dest, compare_value); |
|
837 |
} |
|
838 |
assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
|
839 |
||
840 |
jlong old_value = *dest; |
|
841 |
if (old_value == compare_value) |
|
842 |
*dest = exchange_value; |
|
843 |
return old_value; |
|
844 |
} |
|
845 |
||
846 |
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { |
|
847 |
// try to use the stub: |
|
848 |
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); |
|
849 |
||
850 |
if (func != NULL) { |
|
851 |
os::atomic_add_func = func; |
|
852 |
return (*func)(add_value, dest); |
|
853 |
} |
|
854 |
assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
|
855 |
||
856 |
return (*dest) += add_value; |
|
857 |
} |
|
858 |
||
859 |
void os::fence_bootstrap() { |
|
860 |
// try to use the stub: |
|
861 |
fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry()); |
|
862 |
||
863 |
if (func != NULL) { |
|
864 |
os::fence_func = func; |
|
865 |
(*func)(); |
|
866 |
return; |
|
867 |
} |
|
868 |
assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
|
869 |
||
870 |
// don't have to do anything for a single thread |
|
871 |
} |
|
872 |
||
873 |
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; |
|
874 |
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; |
|
875 |
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; |
|
876 |
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; |
|
877 |
fence_func_t* os::fence_func = os::fence_bootstrap; |
|
878 |
||
879 |
extern "C" _solaris_raw_setup_fpu(address ptr); |
|
880 |
void os::setup_fpu() { |
|
881 |
address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); |
|
882 |
_solaris_raw_setup_fpu(fpu_cntrl); |
|
883 |
} |
|
884 |
#endif // AMD64 |