42664
|
1 |
/*
|
|
2 |
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
// no precompiled headers
|
|
26 |
#include "assembler_arm.inline.hpp"
|
|
27 |
#include "classfile/classLoader.hpp"
|
|
28 |
#include "classfile/systemDictionary.hpp"
|
|
29 |
#include "classfile/vmSymbols.hpp"
|
|
30 |
#include "code/icBuffer.hpp"
|
|
31 |
#include "code/vtableStubs.hpp"
|
|
32 |
#include "interpreter/interpreter.hpp"
|
|
33 |
#include "jvm_linux.h"
|
|
34 |
#include "memory/allocation.inline.hpp"
|
|
35 |
#include "nativeInst_arm.hpp"
|
|
36 |
#include "os_share_linux.hpp"
|
|
37 |
#include "prims/jniFastGetField.hpp"
|
|
38 |
#include "prims/jvm.h"
|
|
39 |
#include "prims/jvm_misc.hpp"
|
|
40 |
#include "runtime/arguments.hpp"
|
|
41 |
#include "runtime/extendedPC.hpp"
|
|
42 |
#include "runtime/frame.inline.hpp"
|
|
43 |
#include "runtime/interfaceSupport.hpp"
|
|
44 |
#include "runtime/java.hpp"
|
|
45 |
#include "runtime/javaCalls.hpp"
|
|
46 |
#include "runtime/mutexLocker.hpp"
|
|
47 |
#include "runtime/osThread.hpp"
|
|
48 |
#include "runtime/sharedRuntime.hpp"
|
|
49 |
#include "runtime/stubRoutines.hpp"
|
|
50 |
#include "runtime/timer.hpp"
|
|
51 |
#include "utilities/events.hpp"
|
|
52 |
#include "utilities/vmError.hpp"
|
|
53 |
|
|
54 |
// put OS-includes here
|
|
55 |
# include <sys/types.h>
|
|
56 |
# include <sys/mman.h>
|
|
57 |
# include <pthread.h>
|
|
58 |
# include <signal.h>
|
|
59 |
# include <errno.h>
|
|
60 |
# include <dlfcn.h>
|
|
61 |
# include <stdlib.h>
|
|
62 |
# include <stdio.h>
|
|
63 |
# include <unistd.h>
|
|
64 |
# include <sys/resource.h>
|
|
65 |
# include <pthread.h>
|
|
66 |
# include <sys/stat.h>
|
|
67 |
# include <sys/time.h>
|
|
68 |
# include <sys/utsname.h>
|
|
69 |
# include <sys/socket.h>
|
|
70 |
# include <sys/wait.h>
|
|
71 |
# include <pwd.h>
|
|
72 |
# include <poll.h>
|
|
73 |
# include <ucontext.h>
|
|
74 |
# include <fpu_control.h>
|
|
75 |
# include <asm/ptrace.h>
|
|
76 |
|
|
77 |
#define SPELL_REG_SP "sp"
|
|
78 |
|
|
79 |
// Don't #define SPELL_REG_FP for thumb because it is not safe to use, so this makes sure we never fetch it.
|
|
80 |
#ifndef __thumb__
|
|
81 |
#define SPELL_REG_FP AARCH64_ONLY("x29") NOT_AARCH64("fp")
|
|
82 |
#endif
|
|
83 |
|
|
84 |
address os::current_stack_pointer() {
|
|
85 |
register address sp __asm__ (SPELL_REG_SP);
|
|
86 |
return sp;
|
|
87 |
}
|
|
88 |
|
|
89 |
char* os::non_memory_address_word() {
|
|
90 |
// Must never look like an address returned by reserve_memory
|
|
91 |
return (char*) -1;
|
|
92 |
}
|
|
93 |
|
|
94 |
void os::initialize_thread(Thread* thr) {
|
|
95 |
// Nothing to do
|
|
96 |
}
|
|
97 |
|
|
98 |
#ifdef AARCH64
|
|
99 |
|
|
100 |
#define arm_pc pc
|
|
101 |
#define arm_sp sp
|
|
102 |
#define arm_fp regs[29]
|
|
103 |
#define arm_r0 regs[0]
|
|
104 |
#define ARM_REGS_IN_CONTEXT 31
|
|
105 |
|
|
106 |
#else
|
|
107 |
|
|
108 |
#if NGREG == 16
|
|
109 |
// These definitions are based on the observation that until
|
|
110 |
// the certain version of GCC mcontext_t was defined as
|
|
111 |
// a structure containing gregs[NGREG] array with 16 elements.
|
|
112 |
// In later GCC versions mcontext_t was redefined as struct sigcontext,
|
|
113 |
// along with NGREG constant changed to 18.
|
|
114 |
#define arm_pc gregs[15]
|
|
115 |
#define arm_sp gregs[13]
|
|
116 |
#define arm_fp gregs[11]
|
|
117 |
#define arm_r0 gregs[0]
|
|
118 |
#endif
|
|
119 |
|
|
120 |
#define ARM_REGS_IN_CONTEXT 16
|
|
121 |
|
|
122 |
#endif // AARCH64
|
|
123 |
|
|
124 |
address os::Linux::ucontext_get_pc(const ucontext_t* uc) {
|
|
125 |
return (address)uc->uc_mcontext.arm_pc;
|
|
126 |
}
|
|
127 |
|
|
128 |
void os::Linux::ucontext_set_pc(ucontext_t* uc, address pc) {
|
|
129 |
uc->uc_mcontext.arm_pc = (uintx)pc;
|
|
130 |
}
|
|
131 |
|
|
132 |
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t* uc) {
|
|
133 |
return (intptr_t*)uc->uc_mcontext.arm_sp;
|
|
134 |
}
|
|
135 |
|
|
136 |
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t* uc) {
|
|
137 |
return (intptr_t*)uc->uc_mcontext.arm_fp;
|
|
138 |
}
|
|
139 |
|
|
140 |
bool is_safe_for_fp(address pc) {
|
|
141 |
#ifdef __thumb__
|
|
142 |
if (CodeCache::find_blob(pc) != NULL) {
|
|
143 |
return true;
|
|
144 |
}
|
|
145 |
// For thumb C frames, given an fp we have no idea how to access the frame contents.
|
|
146 |
return false;
|
|
147 |
#else
|
|
148 |
// Calling os::address_is_in_vm() here leads to a dladdr call. Calling any libc
|
|
149 |
// function during os::get_native_stack() can result in a deadlock if JFR is
|
|
150 |
// enabled. For now, be more lenient and allow all pc's. There are other
|
|
151 |
// frame sanity checks in shared code, and to date they have been sufficient
|
|
152 |
// for other platforms.
|
|
153 |
//return os::address_is_in_vm(pc);
|
|
154 |
return true;
|
|
155 |
#endif
|
|
156 |
}
|
|
157 |
|
|
158 |
// For Forte Analyzer AsyncGetCallTrace profiling support - thread
|
|
159 |
// is currently interrupted by SIGPROF.
|
|
160 |
// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
|
|
161 |
// frames. Currently we don't do that on Linux, so it's the same as
|
|
162 |
// os::fetch_frame_from_context().
|
|
163 |
ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
|
|
164 |
const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
|
|
165 |
|
|
166 |
assert(thread != NULL, "just checking");
|
|
167 |
assert(ret_sp != NULL, "just checking");
|
|
168 |
assert(ret_fp != NULL, "just checking");
|
|
169 |
|
|
170 |
return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
|
|
171 |
}
|
|
172 |
|
|
173 |
ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
|
|
174 |
intptr_t** ret_sp, intptr_t** ret_fp) {
|
|
175 |
|
|
176 |
ExtendedPC epc;
|
|
177 |
const ucontext_t* uc = (const ucontext_t*)ucVoid;
|
|
178 |
|
|
179 |
if (uc != NULL) {
|
|
180 |
epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
|
|
181 |
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
|
|
182 |
if (ret_fp) {
|
|
183 |
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
|
|
184 |
#ifndef __thumb__
|
|
185 |
if (CodeCache::find_blob(epc.pc()) == NULL) {
|
|
186 |
// It's a C frame. We need to adjust the fp.
|
|
187 |
fp += os::C_frame_offset;
|
|
188 |
}
|
|
189 |
#endif
|
|
190 |
// Clear FP when stack walking is dangerous so that
|
|
191 |
// the frame created will not be walked.
|
|
192 |
// However, ensure FP is set correctly when reliable and
|
|
193 |
// potentially necessary.
|
|
194 |
if (!is_safe_for_fp(epc.pc())) {
|
|
195 |
// FP unreliable
|
|
196 |
fp = (intptr_t *)NULL;
|
|
197 |
}
|
|
198 |
*ret_fp = fp;
|
|
199 |
}
|
|
200 |
} else {
|
|
201 |
// construct empty ExtendedPC for return value checking
|
|
202 |
epc = ExtendedPC(NULL);
|
|
203 |
if (ret_sp) *ret_sp = (intptr_t *)NULL;
|
|
204 |
if (ret_fp) *ret_fp = (intptr_t *)NULL;
|
|
205 |
}
|
|
206 |
|
|
207 |
return epc;
|
|
208 |
}
|
|
209 |
|
|
210 |
frame os::fetch_frame_from_context(const void* ucVoid) {
|
|
211 |
intptr_t* sp;
|
|
212 |
intptr_t* fp;
|
|
213 |
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
|
|
214 |
return frame(sp, fp, epc.pc());
|
|
215 |
}
|
|
216 |
|
|
217 |
frame os::get_sender_for_C_frame(frame* fr) {
|
|
218 |
#ifdef __thumb__
|
|
219 |
// We can't reliably get anything from a thumb C frame.
|
|
220 |
return frame();
|
|
221 |
#else
|
|
222 |
address pc = fr->sender_pc();
|
|
223 |
if (! is_safe_for_fp(pc)) {
|
|
224 |
return frame(fr->sender_sp(), (intptr_t *)NULL, pc);
|
|
225 |
} else {
|
|
226 |
return frame(fr->sender_sp(), fr->link() + os::C_frame_offset, pc);
|
|
227 |
}
|
|
228 |
#endif
|
|
229 |
}
|
|
230 |
|
|
231 |
//
|
|
232 |
// This actually returns two frames up. It does not return os::current_frame(),
|
|
233 |
// which is the actual current frame. Nor does it return os::get_native_stack(),
|
|
234 |
// which is the caller. It returns whoever called os::get_native_stack(). Not
|
|
235 |
// very intuitive, but consistent with how this API is implemented on other
|
|
236 |
// platforms.
|
|
237 |
//
|
|
238 |
frame os::current_frame() {
|
|
239 |
#ifdef __thumb__
|
|
240 |
// We can't reliably get anything from a thumb C frame.
|
|
241 |
return frame();
|
|
242 |
#else
|
|
243 |
register intptr_t* fp __asm__ (SPELL_REG_FP);
|
|
244 |
// fp is for os::current_frame. We want the fp for our caller.
|
|
245 |
frame myframe((intptr_t*)os::current_stack_pointer(), fp + os::C_frame_offset,
|
|
246 |
CAST_FROM_FN_PTR(address, os::current_frame));
|
|
247 |
frame caller_frame = os::get_sender_for_C_frame(&myframe);
|
|
248 |
|
|
249 |
if (os::is_first_C_frame(&caller_frame)) {
|
|
250 |
// stack is not walkable
|
|
251 |
// Assert below was added because it does not seem like this can ever happen.
|
|
252 |
// How can this frame ever be the first C frame since it is called from C code?
|
|
253 |
// If it does ever happen, undo the assert and comment here on when/why it happens.
|
|
254 |
assert(false, "this should never happen");
|
|
255 |
return frame();
|
|
256 |
}
|
|
257 |
|
|
258 |
// return frame for our caller's caller
|
|
259 |
return os::get_sender_for_C_frame(&caller_frame);
|
|
260 |
#endif
|
|
261 |
}
|
|
262 |
|
|
263 |
#ifndef AARCH64
|
|
264 |
extern "C" address check_vfp_fault_instr;
|
|
265 |
extern "C" address check_vfp3_32_fault_instr;
|
|
266 |
|
|
267 |
address check_vfp_fault_instr = NULL;
|
|
268 |
address check_vfp3_32_fault_instr = NULL;
|
|
269 |
#endif // !AARCH64
|
|
270 |
extern "C" address check_simd_fault_instr;
|
|
271 |
address check_simd_fault_instr = NULL;
|
|
272 |
|
|
273 |
// Utility functions
|
|
274 |
|
|
275 |
extern "C" int JVM_handle_linux_signal(int sig, siginfo_t* info,
|
|
276 |
void* ucVoid, int abort_if_unrecognized) {
|
|
277 |
ucontext_t* uc = (ucontext_t*) ucVoid;
|
|
278 |
|
|
279 |
Thread* t = Thread::current_or_null_safe();
|
|
280 |
|
|
281 |
// Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
|
|
282 |
// (no destructors can be run)
|
|
283 |
os::WatcherThreadCrashProtection::check_crash_protection(sig, t);
|
|
284 |
|
|
285 |
SignalHandlerMark shm(t);
|
|
286 |
|
|
287 |
if (sig == SIGILL &&
|
|
288 |
((info->si_addr == (caddr_t)check_simd_fault_instr)
|
|
289 |
NOT_AARCH64(|| info->si_addr == (caddr_t)check_vfp_fault_instr)
|
|
290 |
NOT_AARCH64(|| info->si_addr == (caddr_t)check_vfp3_32_fault_instr))) {
|
|
291 |
// skip faulty instruction + instruction that sets return value to
|
|
292 |
// success and set return value to failure.
|
|
293 |
os::Linux::ucontext_set_pc(uc, (address)info->si_addr + 8);
|
|
294 |
uc->uc_mcontext.arm_r0 = 0;
|
|
295 |
return true;
|
|
296 |
}
|
|
297 |
|
|
298 |
// Note: it's not uncommon that JNI code uses signal/sigset to install
|
|
299 |
// then restore certain signal handler (e.g. to temporarily block SIGPIPE,
|
|
300 |
// or have a SIGILL handler when detecting CPU type). When that happens,
|
|
301 |
// JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
|
|
302 |
// avoid unnecessary crash when libjsig is not preloaded, try handle signals
|
|
303 |
// that do not require siginfo/ucontext first.
|
|
304 |
|
|
305 |
if (sig == SIGPIPE || sig == SIGXFSZ) {
|
|
306 |
// allow chained handler to go first
|
|
307 |
if (os::Linux::chained_handler(sig, info, ucVoid)) {
|
|
308 |
return true;
|
|
309 |
} else {
|
|
310 |
// Ignoring SIGPIPE/SIGXFSZ - see bugs 4229104 or 6499219
|
|
311 |
return true;
|
|
312 |
}
|
|
313 |
}
|
|
314 |
|
|
315 |
JavaThread* thread = NULL;
|
|
316 |
VMThread* vmthread = NULL;
|
|
317 |
if (os::Linux::signal_handlers_are_installed) {
|
|
318 |
if (t != NULL ){
|
|
319 |
if(t->is_Java_thread()) {
|
|
320 |
thread = (JavaThread*)t;
|
|
321 |
}
|
|
322 |
else if(t->is_VM_thread()){
|
|
323 |
vmthread = (VMThread *)t;
|
|
324 |
}
|
|
325 |
}
|
|
326 |
}
|
|
327 |
|
|
328 |
address stub = NULL;
|
|
329 |
address pc = NULL;
|
|
330 |
bool unsafe_access = false;
|
|
331 |
|
|
332 |
if (info != NULL && uc != NULL && thread != NULL) {
|
|
333 |
pc = (address) os::Linux::ucontext_get_pc(uc);
|
|
334 |
|
|
335 |
// Handle ALL stack overflow variations here
|
|
336 |
if (sig == SIGSEGV) {
|
|
337 |
address addr = (address) info->si_addr;
|
|
338 |
|
|
339 |
if (StubRoutines::is_safefetch_fault(pc)) {
|
|
340 |
os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
|
|
341 |
return 1;
|
|
342 |
}
|
|
343 |
// check if fault address is within thread stack
|
|
344 |
if (addr < thread->stack_base() &&
|
|
345 |
addr >= thread->stack_base() - thread->stack_size()) {
|
|
346 |
// stack overflow
|
|
347 |
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
|
348 |
thread->disable_stack_yellow_reserved_zone();
|
|
349 |
if (thread->thread_state() == _thread_in_Java) {
|
|
350 |
// Throw a stack overflow exception. Guard pages will be reenabled
|
|
351 |
// while unwinding the stack.
|
|
352 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
|
353 |
} else {
|
|
354 |
// Thread was in the vm or native code. Return and try to finish.
|
|
355 |
return 1;
|
|
356 |
}
|
|
357 |
} else if (thread->in_stack_red_zone(addr)) {
|
|
358 |
// Fatal red zone violation. Disable the guard pages and fall through
|
|
359 |
// to handle_unexpected_exception way down below.
|
|
360 |
thread->disable_stack_red_zone();
|
|
361 |
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
|
|
362 |
} else {
|
|
363 |
// Accessing stack address below sp may cause SEGV if current
|
|
364 |
// thread has MAP_GROWSDOWN stack. This should only happen when
|
|
365 |
// current thread was created by user code with MAP_GROWSDOWN flag
|
|
366 |
// and then attached to VM. See notes in os_linux.cpp.
|
|
367 |
if (thread->osthread()->expanding_stack() == 0) {
|
|
368 |
thread->osthread()->set_expanding_stack();
|
|
369 |
if (os::Linux::manually_expand_stack(thread, addr)) {
|
|
370 |
thread->osthread()->clear_expanding_stack();
|
|
371 |
return 1;
|
|
372 |
}
|
|
373 |
thread->osthread()->clear_expanding_stack();
|
|
374 |
} else {
|
|
375 |
fatal("recursive segv. expanding stack.");
|
|
376 |
}
|
|
377 |
}
|
|
378 |
}
|
|
379 |
}
|
|
380 |
|
|
381 |
if (thread->thread_state() == _thread_in_Java) {
|
|
382 |
// Java thread running in Java code => find exception handler if any
|
|
383 |
// a fault inside compiled code, the interpreter, or a stub
|
|
384 |
|
|
385 |
if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
|
|
386 |
stub = SharedRuntime::get_poll_stub(pc);
|
|
387 |
} else if (sig == SIGBUS) {
|
|
388 |
// BugId 4454115: A read from a MappedByteBuffer can fault
|
|
389 |
// here if the underlying file has been truncated.
|
|
390 |
// Do not crash the VM in such a case.
|
|
391 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
|
392 |
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
|
393 |
if (nm != NULL && nm->has_unsafe_access()) {
|
|
394 |
unsafe_access = true;
|
|
395 |
}
|
|
396 |
} else if (sig == SIGSEGV && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
|
|
397 |
// Determination of interpreter/vtable stub/compiled code null exception
|
|
398 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
|
399 |
if (cb != NULL) {
|
|
400 |
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
|
|
401 |
}
|
|
402 |
} else if (sig == SIGILL && *(int *)pc == NativeInstruction::zombie_illegal_instruction) {
|
|
403 |
// Zombie
|
|
404 |
stub = SharedRuntime::get_handle_wrong_method_stub();
|
|
405 |
}
|
|
406 |
} else if (thread->thread_state() == _thread_in_vm &&
|
|
407 |
sig == SIGBUS && thread->doing_unsafe_access()) {
|
|
408 |
unsafe_access = true;
|
|
409 |
}
|
|
410 |
|
|
411 |
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
|
412 |
// and the heap gets shrunk before the field access.
|
|
413 |
if (sig == SIGSEGV || sig == SIGBUS) {
|
|
414 |
address addr = JNI_FastGetField::find_slowcase_pc(pc);
|
|
415 |
if (addr != (address)-1) {
|
|
416 |
stub = addr;
|
|
417 |
}
|
|
418 |
}
|
|
419 |
|
|
420 |
// Check to see if we caught the safepoint code in the
|
|
421 |
// process of write protecting the memory serialization page.
|
|
422 |
// It write enables the page immediately after protecting it
|
|
423 |
// so we can just return to retry the write.
|
|
424 |
if (sig == SIGSEGV && os::is_memory_serialize_page(thread, (address) info->si_addr)) {
|
|
425 |
// Block current thread until the memory serialize page permission restored.
|
|
426 |
os::block_on_serialize_page_trap();
|
|
427 |
return true;
|
|
428 |
}
|
|
429 |
}
|
|
430 |
|
|
431 |
if (unsafe_access && stub == NULL) {
|
|
432 |
// it can be an unsafe access and we haven't found
|
|
433 |
// any other suitable exception reason,
|
|
434 |
// so assume it is an unsafe access.
|
|
435 |
address next_pc = pc + Assembler::InstructionSize;
|
|
436 |
#ifdef __thumb__
|
|
437 |
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
|
|
438 |
next_pc = (address)((intptr_t)next_pc | 0x1);
|
|
439 |
}
|
|
440 |
#endif
|
|
441 |
|
|
442 |
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
|
443 |
}
|
|
444 |
|
|
445 |
if (stub != NULL) {
|
|
446 |
#ifdef __thumb__
|
|
447 |
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
|
|
448 |
intptr_t p = (intptr_t)pc | 0x1;
|
|
449 |
pc = (address)p;
|
|
450 |
|
|
451 |
// Clear Thumb mode bit if we're redirected into the ARM ISA based code
|
|
452 |
if (((intptr_t)stub & 0x1) == 0) {
|
|
453 |
uc->uc_mcontext.arm_cpsr &= ~PSR_T_BIT;
|
|
454 |
}
|
|
455 |
} else {
|
|
456 |
// No Thumb2 compiled stubs are triggered from ARM ISA compiled JIT'd code today.
|
|
457 |
// The support needs to be added if that changes
|
|
458 |
assert((((intptr_t)stub & 0x1) == 0), "can't return to Thumb code");
|
|
459 |
}
|
|
460 |
#endif
|
|
461 |
|
|
462 |
// save all thread context in case we need to restore it
|
|
463 |
if (thread != NULL) thread->set_saved_exception_pc(pc);
|
|
464 |
|
|
465 |
os::Linux::ucontext_set_pc(uc, stub);
|
|
466 |
return true;
|
|
467 |
}
|
|
468 |
|
|
469 |
// signal-chaining
|
|
470 |
if (os::Linux::chained_handler(sig, info, ucVoid)) {
|
|
471 |
return true;
|
|
472 |
}
|
|
473 |
|
|
474 |
if (!abort_if_unrecognized) {
|
|
475 |
// caller wants another chance, so give it to him
|
|
476 |
return false;
|
|
477 |
}
|
|
478 |
|
|
479 |
if (pc == NULL && uc != NULL) {
|
|
480 |
pc = os::Linux::ucontext_get_pc(uc);
|
|
481 |
}
|
|
482 |
|
|
483 |
// unmask current signal
|
|
484 |
sigset_t newset;
|
|
485 |
sigemptyset(&newset);
|
|
486 |
sigaddset(&newset, sig);
|
|
487 |
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
|
488 |
|
|
489 |
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
|
490 |
|
|
491 |
ShouldNotReachHere();
|
|
492 |
return false;
|
|
493 |
}
|
|
494 |
|
|
495 |
void os::Linux::init_thread_fpu_state(void) {
|
|
496 |
os::setup_fpu();
|
|
497 |
}
|
|
498 |
|
|
499 |
int os::Linux::get_fpu_control_word(void) {
|
|
500 |
return 0;
|
|
501 |
}
|
|
502 |
|
|
503 |
void os::Linux::set_fpu_control_word(int fpu_control) {
|
|
504 |
// Nothing to do
|
|
505 |
}
|
|
506 |
|
|
507 |
void os::setup_fpu() {
|
|
508 |
#ifdef AARCH64
|
|
509 |
__asm__ volatile ("msr fpcr, xzr");
|
|
510 |
#else
|
|
511 |
#if !defined(__SOFTFP__) && defined(__VFP_FP__)
|
|
512 |
// Turn on IEEE-754 compliant VFP mode
|
|
513 |
__asm__ volatile (
|
|
514 |
"mov %%r0, #0;"
|
|
515 |
"fmxr fpscr, %%r0"
|
|
516 |
: /* no output */ : /* no input */ : "r0"
|
|
517 |
);
|
|
518 |
#endif
|
|
519 |
#endif // AARCH64
|
|
520 |
}
|
|
521 |
|
|
522 |
bool os::is_allocatable(size_t bytes) {
|
|
523 |
return true;
|
|
524 |
}
|
|
525 |
|
|
526 |
////////////////////////////////////////////////////////////////////////////////
|
|
527 |
// thread stack
|
|
528 |
|
42907
|
529 |
// Minimum usable stack sizes required to get to user code. Space for
|
|
530 |
// HotSpot guard pages is added later.
|
|
531 |
size_t os::Posix::_compiler_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
|
|
532 |
size_t os::Posix::_java_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
|
42664
|
533 |
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
|
534 |
|
|
535 |
// return default stack size for thr_type
|
|
536 |
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
|
537 |
// default stack size (compiler thread needs larger stack)
|
|
538 |
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
|
|
539 |
return s;
|
|
540 |
}
|
|
541 |
|
|
542 |
/////////////////////////////////////////////////////////////////////////////
|
|
543 |
// helper functions for fatal error handler
|
|
544 |
|
|
545 |
void os::print_context(outputStream *st, const void *context) {
|
|
546 |
if (context == NULL) return;
|
|
547 |
const ucontext_t *uc = (const ucontext_t*)context;
|
|
548 |
|
|
549 |
st->print_cr("Registers:");
|
|
550 |
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
|
|
551 |
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
|
|
552 |
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
|
|
553 |
}
|
|
554 |
#define U64_FORMAT "0x%016llx"
|
|
555 |
#ifdef AARCH64
|
|
556 |
st->print_cr(" %-3s = " U64_FORMAT, "sp", uc->uc_mcontext.sp);
|
|
557 |
st->print_cr(" %-3s = " U64_FORMAT, "pc", uc->uc_mcontext.pc);
|
|
558 |
st->print_cr(" %-3s = " U64_FORMAT, "pstate", uc->uc_mcontext.pstate);
|
|
559 |
#else
|
|
560 |
// now print flag register
|
|
561 |
st->print_cr(" %-4s = 0x%08lx", "cpsr",uc->uc_mcontext.arm_cpsr);
|
|
562 |
#endif
|
|
563 |
st->cr();
|
|
564 |
|
|
565 |
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
|
|
566 |
st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", p2i(sp));
|
|
567 |
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
|
|
568 |
st->cr();
|
|
569 |
|
|
570 |
// Note: it may be unsafe to inspect memory near pc. For example, pc may
|
|
571 |
// point to garbage if entry point in an nmethod is corrupted. Leave
|
|
572 |
// this at the end, and hope for the best.
|
|
573 |
address pc = os::Linux::ucontext_get_pc(uc);
|
|
574 |
st->print_cr("Instructions: (pc=" INTPTR_FORMAT ")", p2i(pc));
|
|
575 |
print_hex_dump(st, pc - 32, pc + 32, Assembler::InstructionSize);
|
|
576 |
}
|
|
577 |
|
|
578 |
void os::print_register_info(outputStream *st, const void *context) {
|
|
579 |
if (context == NULL) return;
|
|
580 |
|
|
581 |
const ucontext_t *uc = (const ucontext_t*)context;
|
|
582 |
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
|
|
583 |
|
|
584 |
st->print_cr("Register to memory mapping:");
|
|
585 |
st->cr();
|
|
586 |
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
|
|
587 |
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
|
|
588 |
print_location(st, reg_area[r]);
|
|
589 |
st->cr();
|
|
590 |
}
|
|
591 |
#ifdef AARCH64
|
|
592 |
st->print_cr(" %-3s = " U64_FORMAT, "pc", uc->uc_mcontext.pc);
|
|
593 |
print_location(st, uc->uc_mcontext.pc);
|
|
594 |
st->cr();
|
|
595 |
#endif
|
|
596 |
st->cr();
|
|
597 |
}
|
|
598 |
|
|
599 |
|
|
600 |
#ifndef AARCH64
|
|
601 |
|
|
602 |
typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*);
|
|
603 |
|
|
604 |
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
|
605 |
|
|
606 |
jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) {
|
|
607 |
// try to use the stub:
|
|
608 |
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
|
609 |
|
|
610 |
if (func != NULL) {
|
|
611 |
os::atomic_cmpxchg_long_func = func;
|
|
612 |
return (*func)(compare_value, exchange_value, dest);
|
|
613 |
}
|
|
614 |
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
615 |
|
|
616 |
jlong old_value = *dest;
|
|
617 |
if (old_value == compare_value)
|
|
618 |
*dest = exchange_value;
|
|
619 |
return old_value;
|
|
620 |
}
|
|
621 |
typedef jlong load_long_func_t(volatile jlong*);
|
|
622 |
|
|
623 |
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
|
|
624 |
|
|
625 |
jlong os::atomic_load_long_bootstrap(volatile jlong* src) {
|
|
626 |
// try to use the stub:
|
|
627 |
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
|
|
628 |
|
|
629 |
if (func != NULL) {
|
|
630 |
os::atomic_load_long_func = func;
|
|
631 |
return (*func)(src);
|
|
632 |
}
|
|
633 |
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
634 |
|
|
635 |
jlong old_value = *src;
|
|
636 |
return old_value;
|
|
637 |
}
|
|
638 |
|
|
639 |
typedef void store_long_func_t(jlong, volatile jlong*);
|
|
640 |
|
|
641 |
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
|
|
642 |
|
|
643 |
void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
|
|
644 |
// try to use the stub:
|
|
645 |
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
|
|
646 |
|
|
647 |
if (func != NULL) {
|
|
648 |
os::atomic_store_long_func = func;
|
|
649 |
return (*func)(val, dest);
|
|
650 |
}
|
|
651 |
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
652 |
|
|
653 |
*dest = val;
|
|
654 |
}
|
|
655 |
|
|
656 |
typedef jint atomic_add_func_t(jint add_value, volatile jint *dest);
|
|
657 |
|
|
658 |
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
|
|
659 |
|
|
660 |
jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
|
|
661 |
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
|
|
662 |
StubRoutines::atomic_add_entry());
|
|
663 |
if (func != NULL) {
|
|
664 |
os::atomic_add_func = func;
|
|
665 |
return (*func)(add_value, dest);
|
|
666 |
}
|
|
667 |
|
|
668 |
jint old_value = *dest;
|
|
669 |
*dest = old_value + add_value;
|
|
670 |
return (old_value + add_value);
|
|
671 |
}
|
|
672 |
|
|
673 |
typedef jint atomic_xchg_func_t(jint exchange_value, volatile jint *dest);
|
|
674 |
|
|
675 |
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
|
676 |
|
|
677 |
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
|
|
678 |
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
|
|
679 |
StubRoutines::atomic_xchg_entry());
|
|
680 |
if (func != NULL) {
|
|
681 |
os::atomic_xchg_func = func;
|
|
682 |
return (*func)(exchange_value, dest);
|
|
683 |
}
|
|
684 |
|
|
685 |
jint old_value = *dest;
|
|
686 |
*dest = exchange_value;
|
|
687 |
return (old_value);
|
|
688 |
}
|
|
689 |
|
|
690 |
typedef jint cmpxchg_func_t(jint, jint, volatile jint*);
|
|
691 |
|
|
692 |
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
|
693 |
|
|
694 |
jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) {
|
|
695 |
// try to use the stub:
|
|
696 |
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
|
697 |
|
|
698 |
if (func != NULL) {
|
|
699 |
os::atomic_cmpxchg_func = func;
|
|
700 |
return (*func)(compare_value, exchange_value, dest);
|
|
701 |
}
|
|
702 |
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
|
703 |
|
|
704 |
jint old_value = *dest;
|
|
705 |
if (old_value == compare_value)
|
|
706 |
*dest = exchange_value;
|
|
707 |
return old_value;
|
|
708 |
}
|
|
709 |
|
|
710 |
#endif // !AARCH64
|
|
711 |
|
|
712 |
#ifndef PRODUCT
|
|
713 |
void os::verify_stack_alignment() {
|
|
714 |
}
|
|
715 |
#endif
|
|
716 |
|
|
717 |
int os::extra_bang_size_in_bytes() {
|
|
718 |
// ARM does not require an additional stack bang.
|
|
719 |
return 0;
|
|
720 |
}
|
|
721 |
|