author | phh |
Wed, 01 Apr 2009 16:38:01 -0400 | |
changeset 2358 | 7c8346929fc6 |
parent 2268 | bea8be80ec88 |
child 2751 | 710d33ee5da7 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2105 | 2 |
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
// do not include precompiled header file |
|
26 |
# include "incls/_os_linux.cpp.incl" |
|
27 |
||
28 |
// put OS-includes here |
|
29 |
# include <sys/types.h> |
|
30 |
# include <sys/mman.h> |
|
31 |
# include <pthread.h> |
|
32 |
# include <signal.h> |
|
33 |
# include <errno.h> |
|
34 |
# include <dlfcn.h> |
|
35 |
# include <stdio.h> |
|
36 |
# include <unistd.h> |
|
37 |
# include <sys/resource.h> |
|
38 |
# include <pthread.h> |
|
39 |
# include <sys/stat.h> |
|
40 |
# include <sys/time.h> |
|
41 |
# include <sys/times.h> |
|
42 |
# include <sys/utsname.h> |
|
43 |
# include <sys/socket.h> |
|
44 |
# include <sys/wait.h> |
|
45 |
# include <pwd.h> |
|
46 |
# include <poll.h> |
|
47 |
# include <semaphore.h> |
|
48 |
# include <fcntl.h> |
|
49 |
# include <string.h> |
|
50 |
# include <syscall.h> |
|
51 |
# include <sys/sysinfo.h> |
|
52 |
# include <gnu/libc-version.h> |
|
53 |
# include <sys/ipc.h> |
|
54 |
# include <sys/shm.h> |
|
55 |
# include <link.h> |
|
56 |
||
57 |
#define MAX_PATH (2 * K) |
|
58 |
||
59 |
// for timer info max values which include all bits |
|
60 |
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) |
|
61 |
#define SEC_IN_NANOSECS 1000000000LL |
|
62 |
||
63 |
//////////////////////////////////////////////////////////////////////////////// |
|
64 |
// global variables |
|
65 |
julong os::Linux::_physical_memory = 0; |
|
66 |
||
67 |
address os::Linux::_initial_thread_stack_bottom = NULL; |
|
68 |
uintptr_t os::Linux::_initial_thread_stack_size = 0; |
|
69 |
||
70 |
int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL; |
|
71 |
int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL; |
|
72 |
Mutex* os::Linux::_createThread_lock = NULL; |
|
73 |
pthread_t os::Linux::_main_thread; |
|
74 |
int os::Linux::_page_size = -1; |
|
75 |
bool os::Linux::_is_floating_stack = false; |
|
76 |
bool os::Linux::_is_NPTL = false; |
|
77 |
bool os::Linux::_supports_fast_thread_cpu_time = false; |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
78 |
const char * os::Linux::_glibc_version = NULL; |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
79 |
const char * os::Linux::_libpthread_version = NULL; |
1 | 80 |
|
81 |
static jlong initial_time_count=0; |
|
82 |
||
83 |
static int clock_tics_per_sec = 100; |
|
84 |
||
85 |
// For diagnostics to print a message once. see run_periodic_checks |
|
86 |
static sigset_t check_signal_done; |
|
87 |
static bool check_signals = true;; |
|
88 |
||
89 |
static pid_t _initial_pid = 0; |
|
90 |
||
91 |
/* Signal number used to suspend/resume a thread */ |
|
92 |
||
93 |
/* do not use any signal number less than SIGSEGV, see 4355769 */ |
|
94 |
static int SR_signum = SIGUSR2; |
|
95 |
sigset_t SR_sigset; |
|
96 |
||
950 | 97 |
/* Used to protect dlsym() calls */ |
98 |
static pthread_mutex_t dl_mutex; |
|
99 |
||
1 | 100 |
//////////////////////////////////////////////////////////////////////////////// |
101 |
// utility functions |
|
102 |
||
103 |
static int SR_initialize(); |
|
104 |
static int SR_finalize(); |
|
105 |
||
106 |
julong os::available_memory() { |
|
107 |
return Linux::available_memory(); |
|
108 |
} |
|
109 |
||
110 |
julong os::Linux::available_memory() { |
|
111 |
// values in struct sysinfo are "unsigned long" |
|
112 |
struct sysinfo si; |
|
113 |
sysinfo(&si); |
|
114 |
||
115 |
return (julong)si.freeram * si.mem_unit; |
|
116 |
} |
|
117 |
||
118 |
julong os::physical_memory() { |
|
119 |
return Linux::physical_memory(); |
|
120 |
} |
|
121 |
||
193
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
122 |
julong os::allocatable_physical_memory(julong size) { |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
123 |
#ifdef _LP64 |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
124 |
return size; |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
125 |
#else |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
126 |
julong result = MIN2(size, (julong)3800*M); |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
127 |
if (!is_allocatable(result)) { |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
128 |
// See comments under solaris for alignment considerations |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
129 |
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size(); |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
130 |
result = MIN2(size, reasonable_size); |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
131 |
} |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
132 |
return result; |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
133 |
#endif // _LP64 |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
134 |
} |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
135 |
|
1 | 136 |
//////////////////////////////////////////////////////////////////////////////// |
137 |
// environment support |
|
138 |
||
139 |
bool os::getenv(const char* name, char* buf, int len) { |
|
140 |
const char* val = ::getenv(name); |
|
141 |
if (val != NULL && strlen(val) < (size_t)len) { |
|
142 |
strcpy(buf, val); |
|
143 |
return true; |
|
144 |
} |
|
145 |
if (len > 0) buf[0] = 0; // return a null string |
|
146 |
return false; |
|
147 |
} |
|
148 |
||
149 |
||
150 |
// Return true if user is running as root. |
|
151 |
||
152 |
bool os::have_special_privileges() { |
|
153 |
static bool init = false; |
|
154 |
static bool privileges = false; |
|
155 |
if (!init) { |
|
156 |
privileges = (getuid() != geteuid()) || (getgid() != getegid()); |
|
157 |
init = true; |
|
158 |
} |
|
159 |
return privileges; |
|
160 |
} |
|
161 |
||
162 |
||
163 |
#ifndef SYS_gettid |
|
164 |
// i386: 224, ia64: 1105, amd64: 186, sparc 143 |
|
165 |
#ifdef __ia64__ |
|
166 |
#define SYS_gettid 1105 |
|
167 |
#elif __i386__ |
|
168 |
#define SYS_gettid 224 |
|
169 |
#elif __amd64__ |
|
170 |
#define SYS_gettid 186 |
|
171 |
#elif __sparc__ |
|
172 |
#define SYS_gettid 143 |
|
173 |
#else |
|
174 |
#error define gettid for the arch |
|
175 |
#endif |
|
176 |
#endif |
|
177 |
||
178 |
// Cpu architecture string |
|
179 |
#if defined(IA64) |
|
180 |
static char cpu_arch[] = "ia64"; |
|
181 |
#elif defined(IA32) |
|
182 |
static char cpu_arch[] = "i386"; |
|
183 |
#elif defined(AMD64) |
|
184 |
static char cpu_arch[] = "amd64"; |
|
185 |
#elif defined(SPARC) |
|
186 |
# ifdef _LP64 |
|
187 |
static char cpu_arch[] = "sparcv9"; |
|
188 |
# else |
|
189 |
static char cpu_arch[] = "sparc"; |
|
190 |
# endif |
|
191 |
#else |
|
192 |
#error Add appropriate cpu_arch setting |
|
193 |
#endif |
|
194 |
||
195 |
||
196 |
// pid_t gettid() |
|
197 |
// |
|
198 |
// Returns the kernel thread id of the currently running thread. Kernel |
|
199 |
// thread id is used to access /proc. |
|
200 |
// |
|
201 |
// (Note that getpid() on LinuxThreads returns kernel thread id too; but |
|
202 |
// on NPTL, it returns the same pid for all threads, as required by POSIX.) |
|
203 |
// |
|
204 |
pid_t os::Linux::gettid() { |
|
205 |
int rslt = syscall(SYS_gettid); |
|
206 |
if (rslt == -1) { |
|
207 |
// old kernel, no NPTL support |
|
208 |
return getpid(); |
|
209 |
} else { |
|
210 |
return (pid_t)rslt; |
|
211 |
} |
|
212 |
} |
|
213 |
||
214 |
// Most versions of linux have a bug where the number of processors are |
|
215 |
// determined by looking at the /proc file system. In a chroot environment, |
|
216 |
// the system call returns 1. This causes the VM to act as if it is |
|
217 |
// a single processor and elide locking (see is_MP() call). |
|
218 |
static bool unsafe_chroot_detected = false; |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
219 |
static const char *unstable_chroot_error = "/proc file system not found.\n" |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
220 |
"Java may be unstable running multithreaded in a chroot " |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
221 |
"environment on Linux when /proc filesystem is not mounted."; |
1 | 222 |
|
223 |
void os::Linux::initialize_system_info() { |
|
224 |
_processor_count = sysconf(_SC_NPROCESSORS_CONF); |
|
225 |
if (_processor_count == 1) { |
|
226 |
pid_t pid = os::Linux::gettid(); |
|
227 |
char fname[32]; |
|
228 |
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid); |
|
229 |
FILE *fp = fopen(fname, "r"); |
|
230 |
if (fp == NULL) { |
|
231 |
unsafe_chroot_detected = true; |
|
232 |
} else { |
|
233 |
fclose(fp); |
|
234 |
} |
|
235 |
} |
|
236 |
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); |
|
237 |
assert(_processor_count > 0, "linux error"); |
|
238 |
} |
|
239 |
||
240 |
void os::init_system_properties_values() { |
|
241 |
// char arch[12]; |
|
242 |
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); |
|
243 |
||
244 |
// The next steps are taken in the product version: |
|
245 |
// |
|
246 |
// Obtain the JAVA_HOME value from the location of libjvm[_g].so. |
|
247 |
// This library should be located at: |
|
248 |
// <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. |
|
249 |
// |
|
250 |
// If "/jre/lib/" appears at the right place in the path, then we |
|
251 |
// assume libjvm[_g].so is installed in a JDK and we use this path. |
|
252 |
// |
|
253 |
// Otherwise exit with message: "Could not create the Java virtual machine." |
|
254 |
// |
|
255 |
// The following extra steps are taken in the debugging version: |
|
256 |
// |
|
257 |
// If "/jre/lib/" does NOT appear at the right place in the path |
|
258 |
// instead of exit check for $JAVA_HOME environment variable. |
|
259 |
// |
|
260 |
// If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, |
|
261 |
// then we append a fake suffix "hotspot/libjvm[_g].so" to this path so |
|
262 |
// it looks like libjvm[_g].so is installed there |
|
263 |
// <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. |
|
264 |
// |
|
265 |
// Otherwise exit. |
|
266 |
// |
|
267 |
// Important note: if the location of libjvm.so changes this |
|
268 |
// code needs to be changed accordingly. |
|
269 |
||
270 |
// The next few definitions allow the code to be verbatim: |
|
271 |
#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) |
|
272 |
#define getenv(n) ::getenv(n) |
|
273 |
||
274 |
/* |
|
275 |
* See ld(1): |
|
276 |
* The linker uses the following search paths to locate required |
|
277 |
* shared libraries: |
|
278 |
* 1: ... |
|
279 |
* ... |
|
280 |
* 7: The default directories, normally /lib and /usr/lib. |
|
281 |
*/ |
|
1885
ae1dcaf4363f
6778662: fixes 64-bits libraries directory search paths on linux
kvn
parents:
1664
diff
changeset
|
282 |
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390)) |
ae1dcaf4363f
6778662: fixes 64-bits libraries directory search paths on linux
kvn
parents:
1664
diff
changeset
|
283 |
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib" |
ae1dcaf4363f
6778662: fixes 64-bits libraries directory search paths on linux
kvn
parents:
1664
diff
changeset
|
284 |
#else |
1 | 285 |
#define DEFAULT_LIBPATH "/lib:/usr/lib" |
1885
ae1dcaf4363f
6778662: fixes 64-bits libraries directory search paths on linux
kvn
parents:
1664
diff
changeset
|
286 |
#endif |
1 | 287 |
|
288 |
#define EXTENSIONS_DIR "/lib/ext" |
|
289 |
#define ENDORSED_DIR "/lib/endorsed" |
|
290 |
#define REG_DIR "/usr/java/packages" |
|
291 |
||
292 |
{ |
|
293 |
/* sysclasspath, java_home, dll_dir */ |
|
294 |
{ |
|
295 |
char *home_path; |
|
296 |
char *dll_path; |
|
297 |
char *pslash; |
|
298 |
char buf[MAXPATHLEN]; |
|
299 |
os::jvm_path(buf, sizeof(buf)); |
|
300 |
||
301 |
// Found the full path to libjvm.so. |
|
302 |
// Now cut the path to <java_home>/jre if we can. |
|
303 |
*(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ |
|
304 |
pslash = strrchr(buf, '/'); |
|
305 |
if (pslash != NULL) |
|
306 |
*pslash = '\0'; /* get rid of /{client|server|hotspot} */ |
|
307 |
dll_path = malloc(strlen(buf) + 1); |
|
308 |
if (dll_path == NULL) |
|
309 |
return; |
|
310 |
strcpy(dll_path, buf); |
|
311 |
Arguments::set_dll_dir(dll_path); |
|
312 |
||
313 |
if (pslash != NULL) { |
|
314 |
pslash = strrchr(buf, '/'); |
|
315 |
if (pslash != NULL) { |
|
316 |
*pslash = '\0'; /* get rid of /<arch> */ |
|
317 |
pslash = strrchr(buf, '/'); |
|
318 |
if (pslash != NULL) |
|
319 |
*pslash = '\0'; /* get rid of /lib */ |
|
320 |
} |
|
321 |
} |
|
322 |
||
323 |
home_path = malloc(strlen(buf) + 1); |
|
324 |
if (home_path == NULL) |
|
325 |
return; |
|
326 |
strcpy(home_path, buf); |
|
327 |
Arguments::set_java_home(home_path); |
|
328 |
||
329 |
if (!set_boot_path('/', ':')) |
|
330 |
return; |
|
331 |
} |
|
332 |
||
333 |
/* |
|
334 |
* Where to look for native libraries |
|
335 |
* |
|
336 |
* Note: Due to a legacy implementation, most of the library path |
|
337 |
* is set in the launcher. This was to accomodate linking restrictions |
|
338 |
* on legacy Linux implementations (which are no longer supported). |
|
339 |
* Eventually, all the library path setting will be done here. |
|
340 |
* |
|
341 |
* However, to prevent the proliferation of improperly built native |
|
342 |
* libraries, the new path component /usr/java/packages is added here. |
|
343 |
* Eventually, all the library path setting will be done here. |
|
344 |
*/ |
|
345 |
{ |
|
346 |
char *ld_library_path; |
|
347 |
||
348 |
/* |
|
349 |
* Construct the invariant part of ld_library_path. Note that the |
|
350 |
* space for the colon and the trailing null are provided by the |
|
351 |
* nulls included by the sizeof operator (so actually we allocate |
|
352 |
* a byte more than necessary). |
|
353 |
*/ |
|
354 |
ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") + |
|
355 |
strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH)); |
|
356 |
sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch); |
|
357 |
||
358 |
/* |
|
359 |
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It |
|
360 |
* should always exist (until the legacy problem cited above is |
|
361 |
* addressed). |
|
362 |
*/ |
|
363 |
char *v = getenv("LD_LIBRARY_PATH"); |
|
364 |
if (v != NULL) { |
|
365 |
char *t = ld_library_path; |
|
366 |
/* That's +1 for the colon and +1 for the trailing '\0' */ |
|
367 |
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1); |
|
368 |
sprintf(ld_library_path, "%s:%s", v, t); |
|
369 |
} |
|
370 |
Arguments::set_library_path(ld_library_path); |
|
371 |
} |
|
372 |
||
373 |
/* |
|
374 |
* Extensions directories. |
|
375 |
* |
|
376 |
* Note that the space for the colon and the trailing null are provided |
|
377 |
* by the nulls included by the sizeof operator (so actually one byte more |
|
378 |
* than necessary is allocated). |
|
379 |
*/ |
|
380 |
{ |
|
381 |
char *buf = malloc(strlen(Arguments::get_java_home()) + |
|
382 |
sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR)); |
|
383 |
sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR, |
|
384 |
Arguments::get_java_home()); |
|
385 |
Arguments::set_ext_dirs(buf); |
|
386 |
} |
|
387 |
||
388 |
/* Endorsed standards default directory. */ |
|
389 |
{ |
|
390 |
char * buf; |
|
391 |
buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); |
|
392 |
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); |
|
393 |
Arguments::set_endorsed_dirs(buf); |
|
394 |
} |
|
395 |
} |
|
396 |
||
397 |
#undef malloc |
|
398 |
#undef getenv |
|
399 |
#undef EXTENSIONS_DIR |
|
400 |
#undef ENDORSED_DIR |
|
401 |
||
402 |
// Done |
|
403 |
return; |
|
404 |
} |
|
405 |
||
406 |
//////////////////////////////////////////////////////////////////////////////// |
|
407 |
// breakpoint support |
|
408 |
||
409 |
void os::breakpoint() { |
|
410 |
BREAKPOINT; |
|
411 |
} |
|
412 |
||
413 |
extern "C" void breakpoint() { |
|
414 |
// use debugger to set breakpoint here |
|
415 |
} |
|
416 |
||
417 |
//////////////////////////////////////////////////////////////////////////////// |
|
418 |
// signal support |
|
419 |
||
420 |
debug_only(static bool signal_sets_initialized = false); |
|
421 |
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; |
|
422 |
||
423 |
bool os::Linux::is_sig_ignored(int sig) { |
|
424 |
struct sigaction oact; |
|
425 |
sigaction(sig, (struct sigaction*)NULL, &oact); |
|
426 |
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) |
|
427 |
: CAST_FROM_FN_PTR(void*, oact.sa_handler); |
|
428 |
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) |
|
429 |
return true; |
|
430 |
else |
|
431 |
return false; |
|
432 |
} |
|
433 |
||
434 |
void os::Linux::signal_sets_init() { |
|
435 |
// Should also have an assertion stating we are still single-threaded. |
|
436 |
assert(!signal_sets_initialized, "Already initialized"); |
|
437 |
// Fill in signals that are necessarily unblocked for all threads in |
|
438 |
// the VM. Currently, we unblock the following signals: |
|
439 |
// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden |
|
440 |
// by -Xrs (=ReduceSignalUsage)); |
|
441 |
// BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all |
|
442 |
// other threads. The "ReduceSignalUsage" boolean tells us not to alter |
|
443 |
// the dispositions or masks wrt these signals. |
|
444 |
// Programs embedding the VM that want to use the above signals for their |
|
445 |
// own purposes must, at this time, use the "-Xrs" option to prevent |
|
446 |
// interference with shutdown hooks and BREAK_SIGNAL thread dumping. |
|
447 |
// (See bug 4345157, and other related bugs). |
|
448 |
// In reality, though, unblocking these signals is really a nop, since |
|
449 |
// these signals are not blocked by default. |
|
450 |
sigemptyset(&unblocked_sigs); |
|
451 |
sigemptyset(&allowdebug_blocked_sigs); |
|
452 |
sigaddset(&unblocked_sigs, SIGILL); |
|
453 |
sigaddset(&unblocked_sigs, SIGSEGV); |
|
454 |
sigaddset(&unblocked_sigs, SIGBUS); |
|
455 |
sigaddset(&unblocked_sigs, SIGFPE); |
|
456 |
sigaddset(&unblocked_sigs, SR_signum); |
|
457 |
||
458 |
if (!ReduceSignalUsage) { |
|
459 |
if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) { |
|
460 |
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); |
|
461 |
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); |
|
462 |
} |
|
463 |
if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) { |
|
464 |
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); |
|
465 |
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); |
|
466 |
} |
|
467 |
if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) { |
|
468 |
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); |
|
469 |
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); |
|
470 |
} |
|
471 |
} |
|
472 |
// Fill in signals that are blocked by all but the VM thread. |
|
473 |
sigemptyset(&vm_sigs); |
|
474 |
if (!ReduceSignalUsage) |
|
475 |
sigaddset(&vm_sigs, BREAK_SIGNAL); |
|
476 |
debug_only(signal_sets_initialized = true); |
|
477 |
||
478 |
} |
|
479 |
||
480 |
// These are signals that are unblocked while a thread is running Java. |
|
481 |
// (For some reason, they get blocked by default.) |
|
482 |
sigset_t* os::Linux::unblocked_signals() { |
|
483 |
assert(signal_sets_initialized, "Not initialized"); |
|
484 |
return &unblocked_sigs; |
|
485 |
} |
|
486 |
||
487 |
// These are the signals that are blocked while a (non-VM) thread is |
|
488 |
// running Java. Only the VM thread handles these signals. |
|
489 |
sigset_t* os::Linux::vm_signals() { |
|
490 |
assert(signal_sets_initialized, "Not initialized"); |
|
491 |
return &vm_sigs; |
|
492 |
} |
|
493 |
||
494 |
// These are signals that are blocked during cond_wait to allow debugger in |
|
495 |
sigset_t* os::Linux::allowdebug_blocked_signals() { |
|
496 |
assert(signal_sets_initialized, "Not initialized"); |
|
497 |
return &allowdebug_blocked_sigs; |
|
498 |
} |
|
499 |
||
500 |
void os::Linux::hotspot_sigmask(Thread* thread) { |
|
501 |
||
502 |
//Save caller's signal mask before setting VM signal mask |
|
503 |
sigset_t caller_sigmask; |
|
504 |
pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); |
|
505 |
||
506 |
OSThread* osthread = thread->osthread(); |
|
507 |
osthread->set_caller_sigmask(caller_sigmask); |
|
508 |
||
509 |
pthread_sigmask(SIG_UNBLOCK, os::Linux::unblocked_signals(), NULL); |
|
510 |
||
511 |
if (!ReduceSignalUsage) { |
|
512 |
if (thread->is_VM_thread()) { |
|
513 |
// Only the VM thread handles BREAK_SIGNAL ... |
|
514 |
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); |
|
515 |
} else { |
|
516 |
// ... all other threads block BREAK_SIGNAL |
|
517 |
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); |
|
518 |
} |
|
519 |
} |
|
520 |
} |
|
521 |
||
522 |
////////////////////////////////////////////////////////////////////////////// |
|
523 |
// detecting pthread library |
|
524 |
||
525 |
void os::Linux::libpthread_init() { |
|
526 |
// Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION |
|
527 |
// and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a |
|
528 |
// generic name for earlier versions. |
|
529 |
// Define macros here so we can build HotSpot on old systems. |
|
530 |
# ifndef _CS_GNU_LIBC_VERSION |
|
531 |
# define _CS_GNU_LIBC_VERSION 2 |
|
532 |
# endif |
|
533 |
# ifndef _CS_GNU_LIBPTHREAD_VERSION |
|
534 |
# define _CS_GNU_LIBPTHREAD_VERSION 3 |
|
535 |
# endif |
|
536 |
||
537 |
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); |
|
538 |
if (n > 0) { |
|
539 |
char *str = (char *)malloc(n); |
|
540 |
confstr(_CS_GNU_LIBC_VERSION, str, n); |
|
541 |
os::Linux::set_glibc_version(str); |
|
542 |
} else { |
|
543 |
// _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version() |
|
544 |
static char _gnu_libc_version[32]; |
|
545 |
jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version), |
|
546 |
"glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release()); |
|
547 |
os::Linux::set_glibc_version(_gnu_libc_version); |
|
548 |
} |
|
549 |
||
550 |
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); |
|
551 |
if (n > 0) { |
|
552 |
char *str = (char *)malloc(n); |
|
553 |
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); |
|
554 |
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells |
|
555 |
// us "NPTL-0.29" even we are running with LinuxThreads. Check if this |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
556 |
// is the case. LinuxThreads has a hard limit on max number of threads. |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
557 |
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value. |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
558 |
// On the other hand, NPTL does not have such a limit, sysconf() |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
559 |
// will return -1 and errno is not changed. Check if it is really NPTL. |
1 | 560 |
if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 && |
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
561 |
strstr(str, "NPTL") && |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
562 |
sysconf(_SC_THREAD_THREADS_MAX) > 0) { |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
563 |
free(str); |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
564 |
os::Linux::set_libpthread_version("linuxthreads"); |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
565 |
} else { |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
566 |
os::Linux::set_libpthread_version(str); |
1 | 567 |
} |
568 |
} else { |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
569 |
// glibc before 2.3.2 only has LinuxThreads. |
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
570 |
os::Linux::set_libpthread_version("linuxthreads"); |
1 | 571 |
} |
572 |
||
573 |
if (strstr(libpthread_version(), "NPTL")) { |
|
574 |
os::Linux::set_is_NPTL(); |
|
575 |
} else { |
|
576 |
os::Linux::set_is_LinuxThreads(); |
|
577 |
} |
|
578 |
||
579 |
// LinuxThreads have two flavors: floating-stack mode, which allows variable |
|
580 |
// stack size; and fixed-stack mode. NPTL is always floating-stack. |
|
581 |
if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) { |
|
582 |
os::Linux::set_is_floating_stack(); |
|
583 |
} |
|
584 |
} |
|
585 |
||
586 |
///////////////////////////////////////////////////////////////////////////// |
|
587 |
// thread stack |
|
588 |
||
589 |
// Force Linux kernel to expand current thread stack. If "bottom" is close |
|
590 |
// to the stack guard, caller should block all signals. |
|
591 |
// |
|
592 |
// MAP_GROWSDOWN: |
|
593 |
// A special mmap() flag that is used to implement thread stacks. It tells |
|
594 |
// kernel that the memory region should extend downwards when needed. This |
|
595 |
// allows early versions of LinuxThreads to only mmap the first few pages |
|
596 |
// when creating a new thread. Linux kernel will automatically expand thread |
|
597 |
// stack as needed (on page faults). |
|
598 |
// |
|
599 |
// However, because the memory region of a MAP_GROWSDOWN stack can grow on |
|
600 |
// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN |
|
601 |
// region, it's hard to tell if the fault is due to a legitimate stack |
|
602 |
// access or because of reading/writing non-exist memory (e.g. buffer |
|
603 |
// overrun). As a rule, if the fault happens below current stack pointer, |
|
604 |
// Linux kernel does not expand stack, instead a SIGSEGV is sent to the |
|
605 |
// application (see Linux kernel fault.c). |
|
606 |
// |
|
607 |
// This Linux feature can cause SIGSEGV when VM bangs thread stack for |
|
608 |
// stack overflow detection. |
|
609 |
// |
|
610 |
// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do |
|
611 |
// not use this flag. However, the stack of initial thread is not created |
|
612 |
// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though |
|
613 |
// unlikely) that user code can create a thread with MAP_GROWSDOWN stack |
|
614 |
// and then attach the thread to JVM. |
|
615 |
// |
|
616 |
// To get around the problem and allow stack banging on Linux, we need to |
|
617 |
// manually expand thread stack after receiving the SIGSEGV. |
|
618 |
// |
|
619 |
// There are two ways to expand thread stack to address "bottom", we used |
|
620 |
// both of them in JVM before 1.5: |
|
621 |
// 1. adjust stack pointer first so that it is below "bottom", and then |
|
622 |
// touch "bottom" |
|
623 |
// 2. mmap() the page in question |
|
624 |
// |
|
625 |
// Now alternate signal stack is gone, it's harder to use 2. For instance, |
|
626 |
// if current sp is already near the lower end of page 101, and we need to |
|
627 |
// call mmap() to map page 100, it is possible that part of the mmap() frame |
|
628 |
// will be placed in page 100. When page 100 is mapped, it is zero-filled. |
|
629 |
// That will destroy the mmap() frame and cause VM to crash. |
|
630 |
// |
|
631 |
// The following code works by adjusting sp first, then accessing the "bottom" |
|
632 |
// page to force a page fault. Linux kernel will then automatically expand the |
|
633 |
// stack mapping. |
|
634 |
// |
|
635 |
// _expand_stack_to() assumes its frame size is less than page size, which |
|
636 |
// should always be true if the function is not inlined. |
|
637 |
||
638 |
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute |
|
639 |
#define NOINLINE |
|
640 |
#else |
|
641 |
#define NOINLINE __attribute__ ((noinline)) |
|
642 |
#endif |
|
643 |
||
644 |
static void _expand_stack_to(address bottom) NOINLINE; |
|
645 |
||
646 |
static void _expand_stack_to(address bottom) { |
|
647 |
address sp; |
|
648 |
size_t size; |
|
649 |
volatile char *p; |
|
650 |
||
651 |
// Adjust bottom to point to the largest address within the same page, it |
|
652 |
// gives us a one-page buffer if alloca() allocates slightly more memory. |
|
653 |
bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size()); |
|
654 |
bottom += os::Linux::page_size() - 1; |
|
655 |
||
656 |
// sp might be slightly above current stack pointer; if that's the case, we |
|
657 |
// will alloca() a little more space than necessary, which is OK. Don't use |
|
658 |
// os::current_stack_pointer(), as its result can be slightly below current |
|
659 |
// stack pointer, causing us to not alloca enough to reach "bottom". |
|
660 |
sp = (address)&sp; |
|
661 |
||
662 |
if (sp > bottom) { |
|
663 |
size = sp - bottom; |
|
664 |
p = (volatile char *)alloca(size); |
|
665 |
assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?"); |
|
666 |
p[0] = '\0'; |
|
667 |
} |
|
668 |
} |
|
669 |
||
670 |
bool os::Linux::manually_expand_stack(JavaThread * t, address addr) { |
|
671 |
assert(t!=NULL, "just checking"); |
|
672 |
assert(t->osthread()->expanding_stack(), "expand should be set"); |
|
673 |
assert(t->stack_base() != NULL, "stack_base was not initialized"); |
|
674 |
||
675 |
if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) { |
|
676 |
sigset_t mask_all, old_sigset; |
|
677 |
sigfillset(&mask_all); |
|
678 |
pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset); |
|
679 |
_expand_stack_to(addr); |
|
680 |
pthread_sigmask(SIG_SETMASK, &old_sigset, NULL); |
|
681 |
return true; |
|
682 |
} |
|
683 |
return false; |
|
684 |
} |
|
685 |
||
686 |
////////////////////////////////////////////////////////////////////////////// |
|
687 |
// create new thread |
|
688 |
||
689 |
static address highest_vm_reserved_address(); |
|
690 |
||
691 |
// check if it's safe to start a new thread |
|
692 |
static bool _thread_safety_check(Thread* thread) { |
|
693 |
if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) { |
|
694 |
// Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat) |
|
695 |
// Heap is mmap'ed at lower end of memory space. Thread stacks are |
|
696 |
// allocated (MAP_FIXED) from high address space. Every thread stack |
|
697 |
// occupies a fixed size slot (usually 2Mbytes, but user can change |
|
698 |
// it to other values if they rebuild LinuxThreads). |
|
699 |
// |
|
700 |
// Problem with MAP_FIXED is that mmap() can still succeed even part of |
|
701 |
// the memory region has already been mmap'ed. That means if we have too |
|
702 |
// many threads and/or very large heap, eventually thread stack will |
|
703 |
// collide with heap. |
|
704 |
// |
|
705 |
// Here we try to prevent heap/stack collision by comparing current |
|
706 |
// stack bottom with the highest address that has been mmap'ed by JVM |
|
707 |
// plus a safety margin for memory maps created by native code. |
|
708 |
// |
|
709 |
// This feature can be disabled by setting ThreadSafetyMargin to 0 |
|
710 |
// |
|
711 |
if (ThreadSafetyMargin > 0) { |
|
712 |
address stack_bottom = os::current_stack_base() - os::current_stack_size(); |
|
713 |
||
714 |
// not safe if our stack extends below the safety margin |
|
715 |
return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address(); |
|
716 |
} else { |
|
717 |
return true; |
|
718 |
} |
|
719 |
} else { |
|
720 |
// Floating stack LinuxThreads or NPTL: |
|
721 |
// Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When |
|
722 |
// there's not enough space left, pthread_create() will fail. If we come |
|
723 |
// here, that means enough space has been reserved for stack. |
|
724 |
return true; |
|
725 |
} |
|
726 |
} |
|
727 |
||
728 |
// Thread start routine for all newly created threads |
|
729 |
static void *java_start(Thread *thread) { |
|
730 |
// Try to randomize the cache line index of hot stack frames. |
|
731 |
// This helps when threads of the same stack traces evict each other's |
|
732 |
// cache lines. The threads can be either from the same JVM instance, or |
|
733 |
// from different JVM instances. The benefit is especially true for |
|
734 |
// processors with hyperthreading technology. |
|
735 |
static int counter = 0; |
|
736 |
int pid = os::current_process_id(); |
|
737 |
alloca(((pid ^ counter++) & 7) * 128); |
|
738 |
||
739 |
ThreadLocalStorage::set_thread(thread); |
|
740 |
||
741 |
OSThread* osthread = thread->osthread(); |
|
742 |
Monitor* sync = osthread->startThread_lock(); |
|
743 |
||
744 |
// non floating stack LinuxThreads needs extra check, see above |
|
745 |
if (!_thread_safety_check(thread)) { |
|
746 |
// notify parent thread |
|
747 |
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); |
|
748 |
osthread->set_state(ZOMBIE); |
|
749 |
sync->notify_all(); |
|
750 |
return NULL; |
|
751 |
} |
|
752 |
||
753 |
// thread_id is kernel thread id (similar to Solaris LWP id) |
|
754 |
osthread->set_thread_id(os::Linux::gettid()); |
|
755 |
||
756 |
if (UseNUMA) { |
|
757 |
int lgrp_id = os::numa_get_group_id(); |
|
758 |
if (lgrp_id != -1) { |
|
759 |
thread->set_lgrp_id(lgrp_id); |
|
760 |
} |
|
761 |
} |
|
762 |
// initialize signal mask for this thread |
|
763 |
os::Linux::hotspot_sigmask(thread); |
|
764 |
||
765 |
// initialize floating point control register |
|
766 |
os::Linux::init_thread_fpu_state(); |
|
767 |
||
768 |
// handshaking with parent thread |
|
769 |
{ |
|
770 |
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); |
|
771 |
||
772 |
// notify parent thread |
|
773 |
osthread->set_state(INITIALIZED); |
|
774 |
sync->notify_all(); |
|
775 |
||
776 |
// wait until os::start_thread() |
|
777 |
while (osthread->get_state() == INITIALIZED) { |
|
778 |
sync->wait(Mutex::_no_safepoint_check_flag); |
|
779 |
} |
|
780 |
} |
|
781 |
||
782 |
// call one more level start routine |
|
783 |
thread->run(); |
|
784 |
||
785 |
return 0; |
|
786 |
} |
|
787 |
||
788 |
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { |
|
789 |
assert(thread->osthread() == NULL, "caller responsible"); |
|
790 |
||
791 |
// Allocate the OSThread object |
|
792 |
OSThread* osthread = new OSThread(NULL, NULL); |
|
793 |
if (osthread == NULL) { |
|
794 |
return false; |
|
795 |
} |
|
796 |
||
797 |
// set the correct thread state |
|
798 |
osthread->set_thread_type(thr_type); |
|
799 |
||
800 |
// Initial state is ALLOCATED but not INITIALIZED |
|
801 |
osthread->set_state(ALLOCATED); |
|
802 |
||
803 |
thread->set_osthread(osthread); |
|
804 |
||
805 |
// init thread attributes |
|
806 |
pthread_attr_t attr; |
|
807 |
pthread_attr_init(&attr); |
|
808 |
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); |
|
809 |
||
810 |
// stack size |
|
811 |
if (os::Linux::supports_variable_stack_size()) { |
|
812 |
// calculate stack size if it's not specified by caller |
|
813 |
if (stack_size == 0) { |
|
814 |
stack_size = os::Linux::default_stack_size(thr_type); |
|
815 |
||
816 |
switch (thr_type) { |
|
817 |
case os::java_thread: |
|
818 |
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss |
|
819 |
if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); |
|
820 |
break; |
|
821 |
case os::compiler_thread: |
|
822 |
if (CompilerThreadStackSize > 0) { |
|
823 |
stack_size = (size_t)(CompilerThreadStackSize * K); |
|
824 |
break; |
|
825 |
} // else fall through: |
|
826 |
// use VMThreadStackSize if CompilerThreadStackSize is not defined |
|
827 |
case os::vm_thread: |
|
828 |
case os::pgc_thread: |
|
829 |
case os::cgc_thread: |
|
830 |
case os::watcher_thread: |
|
831 |
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); |
|
832 |
break; |
|
833 |
} |
|
834 |
} |
|
835 |
||
836 |
stack_size = MAX2(stack_size, os::Linux::min_stack_allowed); |
|
837 |
pthread_attr_setstacksize(&attr, stack_size); |
|
838 |
} else { |
|
839 |
// let pthread_create() pick the default value. |
|
840 |
} |
|
841 |
||
842 |
// glibc guard page |
|
843 |
pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type)); |
|
844 |
||
845 |
ThreadState state; |
|
846 |
||
847 |
{ |
|
848 |
// Serialize thread creation if we are running with fixed stack LinuxThreads |
|
849 |
bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack(); |
|
850 |
if (lock) { |
|
851 |
os::Linux::createThread_lock()->lock_without_safepoint_check(); |
|
852 |
} |
|
853 |
||
854 |
pthread_t tid; |
|
855 |
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread); |
|
856 |
||
857 |
pthread_attr_destroy(&attr); |
|
858 |
||
859 |
if (ret != 0) { |
|
860 |
if (PrintMiscellaneous && (Verbose || WizardMode)) { |
|
861 |
perror("pthread_create()"); |
|
862 |
} |
|
863 |
// Need to clean up stuff we've allocated so far |
|
864 |
thread->set_osthread(NULL); |
|
865 |
delete osthread; |
|
866 |
if (lock) os::Linux::createThread_lock()->unlock(); |
|
867 |
return false; |
|
868 |
} |
|
869 |
||
870 |
// Store pthread info into the OSThread |
|
871 |
osthread->set_pthread_id(tid); |
|
872 |
||
873 |
// Wait until child thread is either initialized or aborted |
|
874 |
{ |
|
875 |
Monitor* sync_with_child = osthread->startThread_lock(); |
|
876 |
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag); |
|
877 |
while ((state = osthread->get_state()) == ALLOCATED) { |
|
878 |
sync_with_child->wait(Mutex::_no_safepoint_check_flag); |
|
879 |
} |
|
880 |
} |
|
881 |
||
882 |
if (lock) { |
|
883 |
os::Linux::createThread_lock()->unlock(); |
|
884 |
} |
|
885 |
} |
|
886 |
||
887 |
// Aborted due to thread limit being reached |
|
888 |
if (state == ZOMBIE) { |
|
889 |
thread->set_osthread(NULL); |
|
890 |
delete osthread; |
|
891 |
return false; |
|
892 |
} |
|
893 |
||
894 |
// The thread is returned suspended (in state INITIALIZED), |
|
895 |
// and is started higher up in the call chain |
|
896 |
assert(state == INITIALIZED, "race condition"); |
|
897 |
return true; |
|
898 |
} |
|
899 |
||
900 |
///////////////////////////////////////////////////////////////////////////// |
|
901 |
// attach existing thread |
|
902 |
||
903 |
// bootstrap the main thread |
|
904 |
bool os::create_main_thread(JavaThread* thread) { |
|
905 |
assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread"); |
|
906 |
return create_attached_thread(thread); |
|
907 |
} |
|
908 |
||
909 |
bool os::create_attached_thread(JavaThread* thread) { |
|
910 |
#ifdef ASSERT |
|
911 |
thread->verify_not_published(); |
|
912 |
#endif |
|
913 |
||
914 |
// Allocate the OSThread object |
|
915 |
OSThread* osthread = new OSThread(NULL, NULL); |
|
916 |
||
917 |
if (osthread == NULL) { |
|
918 |
return false; |
|
919 |
} |
|
920 |
||
921 |
// Store pthread info into the OSThread |
|
922 |
osthread->set_thread_id(os::Linux::gettid()); |
|
923 |
osthread->set_pthread_id(::pthread_self()); |
|
924 |
||
925 |
// initialize floating point control register |
|
926 |
os::Linux::init_thread_fpu_state(); |
|
927 |
||
928 |
// Initial thread state is RUNNABLE |
|
929 |
osthread->set_state(RUNNABLE); |
|
930 |
||
931 |
thread->set_osthread(osthread); |
|
932 |
||
933 |
if (UseNUMA) { |
|
934 |
int lgrp_id = os::numa_get_group_id(); |
|
935 |
if (lgrp_id != -1) { |
|
936 |
thread->set_lgrp_id(lgrp_id); |
|
937 |
} |
|
938 |
} |
|
939 |
||
940 |
if (os::Linux::is_initial_thread()) { |
|
941 |
// If current thread is initial thread, its stack is mapped on demand, |
|
942 |
// see notes about MAP_GROWSDOWN. Here we try to force kernel to map |
|
943 |
// the entire stack region to avoid SEGV in stack banging. |
|
944 |
// It is also useful to get around the heap-stack-gap problem on SuSE |
|
945 |
// kernel (see 4821821 for details). We first expand stack to the top |
|
946 |
// of yellow zone, then enable stack yellow zone (order is significant, |
|
947 |
// enabling yellow zone first will crash JVM on SuSE Linux), so there |
|
948 |
// is no gap between the last two virtual memory regions. |
|
949 |
||
950 |
JavaThread *jt = (JavaThread *)thread; |
|
951 |
address addr = jt->stack_yellow_zone_base(); |
|
952 |
assert(addr != NULL, "initialization problem?"); |
|
953 |
assert(jt->stack_available(addr) > 0, "stack guard should not be enabled"); |
|
954 |
||
955 |
osthread->set_expanding_stack(); |
|
956 |
os::Linux::manually_expand_stack(jt, addr); |
|
957 |
osthread->clear_expanding_stack(); |
|
958 |
} |
|
959 |
||
960 |
// initialize signal mask for this thread |
|
961 |
// and save the caller's signal mask |
|
962 |
os::Linux::hotspot_sigmask(thread); |
|
963 |
||
964 |
return true; |
|
965 |
} |
|
966 |
||
967 |
void os::pd_start_thread(Thread* thread) { |
|
968 |
OSThread * osthread = thread->osthread(); |
|
969 |
assert(osthread->get_state() != INITIALIZED, "just checking"); |
|
970 |
Monitor* sync_with_child = osthread->startThread_lock(); |
|
971 |
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag); |
|
972 |
sync_with_child->notify(); |
|
973 |
} |
|
974 |
||
975 |
// Free Linux resources related to the OSThread |
|
976 |
void os::free_thread(OSThread* osthread) { |
|
977 |
assert(osthread != NULL, "osthread not set"); |
|
978 |
||
979 |
if (Thread::current()->osthread() == osthread) { |
|
980 |
// Restore caller's signal mask |
|
981 |
sigset_t sigmask = osthread->caller_sigmask(); |
|
982 |
pthread_sigmask(SIG_SETMASK, &sigmask, NULL); |
|
983 |
} |
|
984 |
||
985 |
delete osthread; |
|
986 |
} |
|
987 |
||
988 |
////////////////////////////////////////////////////////////////////////////// |
|
989 |
// thread local storage |
|
990 |
||
991 |
int os::allocate_thread_local_storage() { |
|
992 |
pthread_key_t key; |
|
993 |
int rslt = pthread_key_create(&key, NULL); |
|
994 |
assert(rslt == 0, "cannot allocate thread local storage"); |
|
995 |
return (int)key; |
|
996 |
} |
|
997 |
||
998 |
// Note: This is currently not used by VM, as we don't destroy TLS key |
|
999 |
// on VM exit. |
|
1000 |
void os::free_thread_local_storage(int index) { |
|
1001 |
int rslt = pthread_key_delete((pthread_key_t)index); |
|
1002 |
assert(rslt == 0, "invalid index"); |
|
1003 |
} |
|
1004 |
||
1005 |
void os::thread_local_storage_at_put(int index, void* value) { |
|
1006 |
int rslt = pthread_setspecific((pthread_key_t)index, value); |
|
1007 |
assert(rslt == 0, "pthread_setspecific failed"); |
|
1008 |
} |
|
1009 |
||
1010 |
extern "C" Thread* get_thread() { |
|
1011 |
return ThreadLocalStorage::thread(); |
|
1012 |
} |
|
1013 |
||
1014 |
////////////////////////////////////////////////////////////////////////////// |
|
1015 |
// initial thread |
|
1016 |
||
1017 |
// Check if current thread is the initial thread, similar to Solaris thr_main. |
|
1018 |
bool os::Linux::is_initial_thread(void) { |
|
1019 |
char dummy; |
|
1020 |
// If called before init complete, thread stack bottom will be null. |
|
1021 |
// Can be called if fatal error occurs before initialization. |
|
1022 |
if (initial_thread_stack_bottom() == NULL) return false; |
|
1023 |
assert(initial_thread_stack_bottom() != NULL && |
|
1024 |
initial_thread_stack_size() != 0, |
|
1025 |
"os::init did not locate initial thread's stack region"); |
|
1026 |
if ((address)&dummy >= initial_thread_stack_bottom() && |
|
1027 |
(address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size()) |
|
1028 |
return true; |
|
1029 |
else return false; |
|
1030 |
} |
|
1031 |
||
1032 |
// Find the virtual memory area that contains addr |
|
1033 |
static bool find_vma(address addr, address* vma_low, address* vma_high) { |
|
1034 |
FILE *fp = fopen("/proc/self/maps", "r"); |
|
1035 |
if (fp) { |
|
1036 |
address low, high; |
|
1037 |
while (!feof(fp)) { |
|
1038 |
if (fscanf(fp, "%p-%p", &low, &high) == 2) { |
|
1039 |
if (low <= addr && addr < high) { |
|
1040 |
if (vma_low) *vma_low = low; |
|
1041 |
if (vma_high) *vma_high = high; |
|
1042 |
fclose (fp); |
|
1043 |
return true; |
|
1044 |
} |
|
1045 |
} |
|
1046 |
for (;;) { |
|
1047 |
int ch = fgetc(fp); |
|
1048 |
if (ch == EOF || ch == (int)'\n') break; |
|
1049 |
} |
|
1050 |
} |
|
1051 |
fclose(fp); |
|
1052 |
} |
|
1053 |
return false; |
|
1054 |
} |
|
1055 |
||
1056 |
// Locate initial thread stack. This special handling of initial thread stack |
|
1057 |
// is needed because pthread_getattr_np() on most (all?) Linux distros returns |
|
1058 |
// bogus value for initial thread. |
|
1059 |
void os::Linux::capture_initial_stack(size_t max_size) { |
|
1060 |
// stack size is the easy part, get it from RLIMIT_STACK |
|
1061 |
size_t stack_size; |
|
1062 |
struct rlimit rlim; |
|
1063 |
getrlimit(RLIMIT_STACK, &rlim); |
|
1064 |
stack_size = rlim.rlim_cur; |
|
1065 |
||
1066 |
// 6308388: a bug in ld.so will relocate its own .data section to the |
|
1067 |
// lower end of primordial stack; reduce ulimit -s value a little bit |
|
1068 |
// so we won't install guard page on ld.so's data section. |
|
1069 |
stack_size -= 2 * page_size(); |
|
1070 |
||
1071 |
// 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat |
|
1072 |
// 7.1, in both cases we will get 2G in return value. |
|
1073 |
// 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0, |
|
1074 |
// SuSE 7.2, Debian) can not handle alternate signal stack correctly |
|
1075 |
// for initial thread if its stack size exceeds 6M. Cap it at 2M, |
|
1076 |
// in case other parts in glibc still assumes 2M max stack size. |
|
1077 |
// FIXME: alt signal stack is gone, maybe we can relax this constraint? |
|
1078 |
#ifndef IA64 |
|
1079 |
if (stack_size > 2 * K * K) stack_size = 2 * K * K; |
|
1080 |
#else |
|
1081 |
// Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small |
|
1082 |
if (stack_size > 4 * K * K) stack_size = 4 * K * K; |
|
1083 |
#endif |
|
1084 |
||
1085 |
// Try to figure out where the stack base (top) is. This is harder. |
|
1086 |
// |
|
1087 |
// When an application is started, glibc saves the initial stack pointer in |
|
1088 |
// a global variable "__libc_stack_end", which is then used by system |
|
1089 |
// libraries. __libc_stack_end should be pretty close to stack top. The |
|
1090 |
// variable is available since the very early days. However, because it is |
|
1091 |
// a private interface, it could disappear in the future. |
|
1092 |
// |
|
1093 |
// Linux kernel saves start_stack information in /proc/<pid>/stat. Similar |
|
1094 |
// to __libc_stack_end, it is very close to stack top, but isn't the real |
|
1095 |
// stack top. Note that /proc may not exist if VM is running as a chroot |
|
1096 |
// program, so reading /proc/<pid>/stat could fail. Also the contents of |
|
1097 |
// /proc/<pid>/stat could change in the future (though unlikely). |
|
1098 |
// |
|
1099 |
// We try __libc_stack_end first. If that doesn't work, look for |
|
1100 |
// /proc/<pid>/stat. If neither of them works, we use current stack pointer |
|
1101 |
// as a hint, which should work well in most cases. |
|
1102 |
||
1103 |
uintptr_t stack_start; |
|
1104 |
||
1105 |
// try __libc_stack_end first |
|
1106 |
uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end"); |
|
1107 |
if (p && *p) { |
|
1108 |
stack_start = *p; |
|
1109 |
} else { |
|
1110 |
// see if we can get the start_stack field from /proc/self/stat |
|
1111 |
FILE *fp; |
|
1112 |
int pid; |
|
1113 |
char state; |
|
1114 |
int ppid; |
|
1115 |
int pgrp; |
|
1116 |
int session; |
|
1117 |
int nr; |
|
1118 |
int tpgrp; |
|
1119 |
unsigned long flags; |
|
1120 |
unsigned long minflt; |
|
1121 |
unsigned long cminflt; |
|
1122 |
unsigned long majflt; |
|
1123 |
unsigned long cmajflt; |
|
1124 |
unsigned long utime; |
|
1125 |
unsigned long stime; |
|
1126 |
long cutime; |
|
1127 |
long cstime; |
|
1128 |
long prio; |
|
1129 |
long nice; |
|
1130 |
long junk; |
|
1131 |
long it_real; |
|
1132 |
uintptr_t start; |
|
1133 |
uintptr_t vsize; |
|
1134 |
uintptr_t rss; |
|
1135 |
unsigned long rsslim; |
|
1136 |
uintptr_t scodes; |
|
1137 |
uintptr_t ecode; |
|
1138 |
int i; |
|
1139 |
||
1140 |
// Figure what the primordial thread stack base is. Code is inspired |
|
1141 |
// by email from Hans Boehm. /proc/self/stat begins with current pid, |
|
1142 |
// followed by command name surrounded by parentheses, state, etc. |
|
1143 |
char stat[2048]; |
|
1144 |
int statlen; |
|
1145 |
||
1146 |
fp = fopen("/proc/self/stat", "r"); |
|
1147 |
if (fp) { |
|
1148 |
statlen = fread(stat, 1, 2047, fp); |
|
1149 |
stat[statlen] = '\0'; |
|
1150 |
fclose(fp); |
|
1151 |
||
1152 |
// Skip pid and the command string. Note that we could be dealing with |
|
1153 |
// weird command names, e.g. user could decide to rename java launcher |
|
1154 |
// to "java 1.4.2 :)", then the stat file would look like |
|
1155 |
// 1234 (java 1.4.2 :)) R ... ... |
|
1156 |
// We don't really need to know the command string, just find the last |
|
1157 |
// occurrence of ")" and then start parsing from there. See bug 4726580. |
|
1158 |
char * s = strrchr(stat, ')'); |
|
1159 |
||
1160 |
i = 0; |
|
1161 |
if (s) { |
|
1162 |
// Skip blank chars |
|
1163 |
do s++; while (isspace(*s)); |
|
1164 |
||
1165 |
/* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */ |
|
1166 |
/* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */ |
|
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1167 |
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1168 |
UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1169 |
" %lu " |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1170 |
UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT, |
1 | 1171 |
&state, /* 3 %c */ |
1172 |
&ppid, /* 4 %d */ |
|
1173 |
&pgrp, /* 5 %d */ |
|
1174 |
&session, /* 6 %d */ |
|
1175 |
&nr, /* 7 %d */ |
|
1176 |
&tpgrp, /* 8 %d */ |
|
1177 |
&flags, /* 9 %lu */ |
|
1178 |
&minflt, /* 10 %lu */ |
|
1179 |
&cminflt, /* 11 %lu */ |
|
1180 |
&majflt, /* 12 %lu */ |
|
1181 |
&cmajflt, /* 13 %lu */ |
|
1182 |
&utime, /* 14 %lu */ |
|
1183 |
&stime, /* 15 %lu */ |
|
1184 |
&cutime, /* 16 %ld */ |
|
1185 |
&cstime, /* 17 %ld */ |
|
1186 |
&prio, /* 18 %ld */ |
|
1187 |
&nice, /* 19 %ld */ |
|
1188 |
&junk, /* 20 %ld */ |
|
1189 |
&it_real, /* 21 %ld */ |
|
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1190 |
&start, /* 22 UINTX_FORMAT */ |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1191 |
&vsize, /* 23 UINTX_FORMAT */ |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1192 |
&rss, /* 24 UINTX_FORMAT */ |
1 | 1193 |
&rsslim, /* 25 %lu */ |
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1194 |
&scodes, /* 26 UINTX_FORMAT */ |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1195 |
&ecode, /* 27 UINTX_FORMAT */ |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
1196 |
&stack_start); /* 28 UINTX_FORMAT */ |
1 | 1197 |
} |
1198 |
||
1199 |
if (i != 28 - 2) { |
|
1200 |
assert(false, "Bad conversion from /proc/self/stat"); |
|
1201 |
// product mode - assume we are the initial thread, good luck in the |
|
1202 |
// embedded case. |
|
1203 |
warning("Can't detect initial thread stack location - bad conversion"); |
|
1204 |
stack_start = (uintptr_t) &rlim; |
|
1205 |
} |
|
1206 |
} else { |
|
1207 |
// For some reason we can't open /proc/self/stat (for example, running on |
|
1208 |
// FreeBSD with a Linux emulator, or inside chroot), this should work for |
|
1209 |
// most cases, so don't abort: |
|
1210 |
warning("Can't detect initial thread stack location - no /proc/self/stat"); |
|
1211 |
stack_start = (uintptr_t) &rlim; |
|
1212 |
} |
|
1213 |
} |
|
1214 |
||
1215 |
// Now we have a pointer (stack_start) very close to the stack top, the |
|
1216 |
// next thing to do is to figure out the exact location of stack top. We |
|
1217 |
// can find out the virtual memory area that contains stack_start by |
|
1218 |
// reading /proc/self/maps, it should be the last vma in /proc/self/maps, |
|
1219 |
// and its upper limit is the real stack top. (again, this would fail if |
|
1220 |
// running inside chroot, because /proc may not exist.) |
|
1221 |
||
1222 |
uintptr_t stack_top; |
|
1223 |
address low, high; |
|
1224 |
if (find_vma((address)stack_start, &low, &high)) { |
|
1225 |
// success, "high" is the true stack top. (ignore "low", because initial |
|
1226 |
// thread stack grows on demand, its real bottom is high - RLIMIT_STACK.) |
|
1227 |
stack_top = (uintptr_t)high; |
|
1228 |
} else { |
|
1229 |
// failed, likely because /proc/self/maps does not exist |
|
1230 |
warning("Can't detect initial thread stack location - find_vma failed"); |
|
1231 |
// best effort: stack_start is normally within a few pages below the real |
|
1232 |
// stack top, use it as stack top, and reduce stack size so we won't put |
|
1233 |
// guard page outside stack. |
|
1234 |
stack_top = stack_start; |
|
1235 |
stack_size -= 16 * page_size(); |
|
1236 |
} |
|
1237 |
||
1238 |
// stack_top could be partially down the page so align it |
|
1239 |
stack_top = align_size_up(stack_top, page_size()); |
|
1240 |
||
1241 |
if (max_size && stack_size > max_size) { |
|
1242 |
_initial_thread_stack_size = max_size; |
|
1243 |
} else { |
|
1244 |
_initial_thread_stack_size = stack_size; |
|
1245 |
} |
|
1246 |
||
1247 |
_initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size()); |
|
1248 |
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; |
|
1249 |
} |
|
1250 |
||
1251 |
//////////////////////////////////////////////////////////////////////////////// |
|
1252 |
// time support |
|
1253 |
||
1254 |
// Time since start-up in seconds to a fine granularity. |
|
1255 |
// Used by VMSelfDestructTimer and the MemProfiler. |
|
1256 |
double os::elapsedTime() { |
|
1257 |
||
1258 |
return (double)(os::elapsed_counter()) * 0.000001; |
|
1259 |
} |
|
1260 |
||
1261 |
jlong os::elapsed_counter() { |
|
1262 |
timeval time; |
|
1263 |
int status = gettimeofday(&time, NULL); |
|
1264 |
return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; |
|
1265 |
} |
|
1266 |
||
1267 |
jlong os::elapsed_frequency() { |
|
1268 |
return (1000 * 1000); |
|
1269 |
} |
|
1270 |
||
1374
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1271 |
// For now, we say that linux does not support vtime. I have no idea |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1272 |
// whether it can actually be made to (DLD, 9/13/05). |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1273 |
|
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1274 |
bool os::supports_vtime() { return false; } |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1275 |
bool os::enable_vtime() { return false; } |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1276 |
bool os::vtime_enabled() { return false; } |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1277 |
double os::elapsedVTime() { |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1278 |
// better than nothing, but not much |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1279 |
return elapsedTime(); |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1280 |
} |
4c24294029a9
6711316: Open source the Garbage-First garbage collector
ysr
parents:
388
diff
changeset
|
1281 |
|
234 | 1282 |
jlong os::javaTimeMillis() { |
1 | 1283 |
timeval time; |
1284 |
int status = gettimeofday(&time, NULL); |
|
1285 |
assert(status != -1, "linux error"); |
|
1286 |
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); |
|
1287 |
} |
|
1288 |
||
1289 |
#ifndef CLOCK_MONOTONIC |
|
1290 |
#define CLOCK_MONOTONIC (1) |
|
1291 |
#endif |
|
1292 |
||
1293 |
void os::Linux::clock_init() { |
|
1294 |
// we do dlopen's in this particular order due to bug in linux |
|
1295 |
// dynamical loader (see 6348968) leading to crash on exit |
|
1296 |
void* handle = dlopen("librt.so.1", RTLD_LAZY); |
|
1297 |
if (handle == NULL) { |
|
1298 |
handle = dlopen("librt.so", RTLD_LAZY); |
|
1299 |
} |
|
1300 |
||
1301 |
if (handle) { |
|
1302 |
int (*clock_getres_func)(clockid_t, struct timespec*) = |
|
1303 |
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres"); |
|
1304 |
int (*clock_gettime_func)(clockid_t, struct timespec*) = |
|
1305 |
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime"); |
|
1306 |
if (clock_getres_func && clock_gettime_func) { |
|
1307 |
// See if monotonic clock is supported by the kernel. Note that some |
|
1308 |
// early implementations simply return kernel jiffies (updated every |
|
1309 |
// 1/100 or 1/1000 second). It would be bad to use such a low res clock |
|
1310 |
// for nano time (though the monotonic property is still nice to have). |
|
1311 |
// It's fixed in newer kernels, however clock_getres() still returns |
|
1312 |
// 1/HZ. We check if clock_getres() works, but will ignore its reported |
|
1313 |
// resolution for now. Hopefully as people move to new kernels, this |
|
1314 |
// won't be a problem. |
|
1315 |
struct timespec res; |
|
1316 |
struct timespec tp; |
|
1317 |
if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 && |
|
1318 |
clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) { |
|
1319 |
// yes, monotonic clock is supported |
|
1320 |
_clock_gettime = clock_gettime_func; |
|
1321 |
} else { |
|
1322 |
// close librt if there is no monotonic clock |
|
1323 |
dlclose(handle); |
|
1324 |
} |
|
1325 |
} |
|
1326 |
} |
|
1327 |
} |
|
1328 |
||
1329 |
#ifndef SYS_clock_getres |
|
1330 |
||
1331 |
#if defined(IA32) || defined(AMD64) |
|
1332 |
#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) |
|
1333 |
#else |
|
1334 |
#error Value of SYS_clock_getres not known on this platform |
|
1335 |
#endif |
|
1336 |
||
1337 |
#endif |
|
1338 |
||
1339 |
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y) |
|
1340 |
||
1341 |
void os::Linux::fast_thread_clock_init() { |
|
1342 |
if (!UseLinuxPosixThreadCPUClocks) { |
|
1343 |
return; |
|
1344 |
} |
|
1345 |
clockid_t clockid; |
|
1346 |
struct timespec tp; |
|
1347 |
int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) = |
|
1348 |
(int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid"); |
|
1349 |
||
1350 |
// Switch to using fast clocks for thread cpu time if |
|
1351 |
// the sys_clock_getres() returns 0 error code. |
|
1352 |
// Note, that some kernels may support the current thread |
|
1353 |
// clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks |
|
1354 |
// returned by the pthread_getcpuclockid(). |
|
1355 |
// If the fast Posix clocks are supported then the sys_clock_getres() |
|
1356 |
// must return at least tp.tv_sec == 0 which means a resolution |
|
1357 |
// better than 1 sec. This is extra check for reliability. |
|
1358 |
||
1359 |
if(pthread_getcpuclockid_func && |
|
1360 |
pthread_getcpuclockid_func(_main_thread, &clockid) == 0 && |
|
1361 |
sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) { |
|
1362 |
||
1363 |
_supports_fast_thread_cpu_time = true; |
|
1364 |
_pthread_getcpuclockid = pthread_getcpuclockid_func; |
|
1365 |
} |
|
1366 |
} |
|
1367 |
||
1368 |
jlong os::javaTimeNanos() { |
|
1369 |
if (Linux::supports_monotonic_clock()) { |
|
1370 |
struct timespec tp; |
|
1371 |
int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp); |
|
1372 |
assert(status == 0, "gettime error"); |
|
1373 |
jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec); |
|
1374 |
return result; |
|
1375 |
} else { |
|
1376 |
timeval time; |
|
1377 |
int status = gettimeofday(&time, NULL); |
|
1378 |
assert(status != -1, "linux error"); |
|
1379 |
jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec); |
|
1380 |
return 1000 * usecs; |
|
1381 |
} |
|
1382 |
} |
|
1383 |
||
1384 |
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { |
|
1385 |
if (Linux::supports_monotonic_clock()) { |
|
1386 |
info_ptr->max_value = ALL_64_BITS; |
|
1387 |
||
1388 |
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past |
|
1389 |
info_ptr->may_skip_backward = false; // not subject to resetting or drifting |
|
1390 |
info_ptr->may_skip_forward = false; // not subject to resetting or drifting |
|
1391 |
} else { |
|
1392 |
// gettimeofday - based on time in seconds since the Epoch thus does not wrap |
|
1393 |
info_ptr->max_value = ALL_64_BITS; |
|
1394 |
||
1395 |
// gettimeofday is a real time clock so it skips |
|
1396 |
info_ptr->may_skip_backward = true; |
|
1397 |
info_ptr->may_skip_forward = true; |
|
1398 |
} |
|
1399 |
||
1400 |
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time |
|
1401 |
} |
|
1402 |
||
1403 |
// Return the real, user, and system times in seconds from an |
|
1404 |
// arbitrary fixed point in the past. |
|
1405 |
bool os::getTimesSecs(double* process_real_time, |
|
1406 |
double* process_user_time, |
|
1407 |
double* process_system_time) { |
|
1408 |
struct tms ticks; |
|
1409 |
clock_t real_ticks = times(&ticks); |
|
1410 |
||
1411 |
if (real_ticks == (clock_t) (-1)) { |
|
1412 |
return false; |
|
1413 |
} else { |
|
1414 |
double ticks_per_second = (double) clock_tics_per_sec; |
|
1415 |
*process_user_time = ((double) ticks.tms_utime) / ticks_per_second; |
|
1416 |
*process_system_time = ((double) ticks.tms_stime) / ticks_per_second; |
|
1417 |
*process_real_time = ((double) real_ticks) / ticks_per_second; |
|
1418 |
||
1419 |
return true; |
|
1420 |
} |
|
1421 |
} |
|
1422 |
||
1423 |
||
1424 |
char * os::local_time_string(char *buf, size_t buflen) { |
|
1425 |
struct tm t; |
|
1426 |
time_t long_time; |
|
1427 |
time(&long_time); |
|
1428 |
localtime_r(&long_time, &t); |
|
1429 |
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", |
|
1430 |
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, |
|
1431 |
t.tm_hour, t.tm_min, t.tm_sec); |
|
1432 |
return buf; |
|
1433 |
} |
|
1434 |
||
2012
041fbc6030dd
6800586: -XX:+PrintGCDateStamps is using mt-unsafe localtime function
ysr
parents:
1892
diff
changeset
|
1435 |
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { |
041fbc6030dd
6800586: -XX:+PrintGCDateStamps is using mt-unsafe localtime function
ysr
parents:
1892
diff
changeset
|
1436 |
return localtime_r(clock, res); |
041fbc6030dd
6800586: -XX:+PrintGCDateStamps is using mt-unsafe localtime function
ysr
parents:
1892
diff
changeset
|
1437 |
} |
041fbc6030dd
6800586: -XX:+PrintGCDateStamps is using mt-unsafe localtime function
ysr
parents:
1892
diff
changeset
|
1438 |
|
1 | 1439 |
//////////////////////////////////////////////////////////////////////////////// |
1440 |
// runtime exit support |
|
1441 |
||
1442 |
// Note: os::shutdown() might be called very early during initialization, or |
|
1443 |
// called from signal handler. Before adding something to os::shutdown(), make |
|
1444 |
// sure it is async-safe and can handle partially initialized VM. |
|
1445 |
void os::shutdown() { |
|
1446 |
||
1447 |
// allow PerfMemory to attempt cleanup of any persistent resources |
|
1448 |
perfMemory_exit(); |
|
1449 |
||
1450 |
// needs to remove object in file system |
|
1451 |
AttachListener::abort(); |
|
1452 |
||
1453 |
// flush buffered output, finish log files |
|
1454 |
ostream_abort(); |
|
1455 |
||
1456 |
// Check for abort hook |
|
1457 |
abort_hook_t abort_hook = Arguments::abort_hook(); |
|
1458 |
if (abort_hook != NULL) { |
|
1459 |
abort_hook(); |
|
1460 |
} |
|
1461 |
||
1462 |
} |
|
1463 |
||
1464 |
// Note: os::abort() might be called very early during initialization, or |
|
1465 |
// called from signal handler. Before adding something to os::abort(), make |
|
1466 |
// sure it is async-safe and can handle partially initialized VM. |
|
1467 |
void os::abort(bool dump_core) { |
|
1468 |
os::shutdown(); |
|
1469 |
if (dump_core) { |
|
1470 |
#ifndef PRODUCT |
|
1471 |
fdStream out(defaultStream::output_fd()); |
|
1472 |
out.print_raw("Current thread is "); |
|
1473 |
char buf[16]; |
|
1474 |
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); |
|
1475 |
out.print_raw_cr(buf); |
|
1476 |
out.print_raw_cr("Dumping core ..."); |
|
1477 |
#endif |
|
1478 |
::abort(); // dump core |
|
1479 |
} |
|
1480 |
||
1481 |
::exit(1); |
|
1482 |
} |
|
1483 |
||
1484 |
// Die immediately, no exit hook, no abort hook, no cleanup. |
|
1485 |
void os::die() { |
|
1486 |
// _exit() on LinuxThreads only kills current thread |
|
1487 |
::abort(); |
|
1488 |
} |
|
1489 |
||
1490 |
// unused on linux for now. |
|
1491 |
void os::set_error_file(const char *logfile) {} |
|
1492 |
||
1493 |
intx os::current_thread_id() { return (intx)pthread_self(); } |
|
1494 |
int os::current_process_id() { |
|
1495 |
||
1496 |
// Under the old linux thread library, linux gives each thread |
|
1497 |
// its own process id. Because of this each thread will return |
|
1498 |
// a different pid if this method were to return the result |
|
1499 |
// of getpid(2). Linux provides no api that returns the pid |
|
1500 |
// of the launcher thread for the vm. This implementation |
|
1501 |
// returns a unique pid, the pid of the launcher thread |
|
1502 |
// that starts the vm 'process'. |
|
1503 |
||
1504 |
// Under the NPTL, getpid() returns the same pid as the |
|
1505 |
// launcher thread rather than a unique pid per thread. |
|
1506 |
// Use gettid() if you want the old pre NPTL behaviour. |
|
1507 |
||
1508 |
// if you are looking for the result of a call to getpid() that |
|
1509 |
// returns a unique pid for the calling thread, then look at the |
|
1510 |
// OSThread::thread_id() method in osThread_linux.hpp file |
|
1511 |
||
1512 |
return (int)(_initial_pid ? _initial_pid : getpid()); |
|
1513 |
} |
|
1514 |
||
1515 |
// DLL functions |
|
1516 |
||
1517 |
const char* os::dll_file_extension() { return ".so"; } |
|
1518 |
||
1519 |
const char* os::get_temp_directory() { return "/tmp/"; } |
|
1520 |
||
2358 | 1521 |
static bool file_exists(const char* filename) { |
1522 |
struct stat statbuf; |
|
1523 |
if (filename == NULL || strlen(filename) == 0) { |
|
1524 |
return false; |
|
1525 |
} |
|
1526 |
return os::stat(filename, &statbuf) == 0; |
|
1527 |
} |
|
1528 |
||
1529 |
void os::dll_build_name(char* buffer, size_t buflen, |
|
1530 |
const char* pname, const char* fname) { |
|
1531 |
// Copied from libhpi |
|
950 | 1532 |
const size_t pnamelen = pname ? strlen(pname) : 0; |
1533 |
||
2358 | 1534 |
// Quietly truncate on buffer overflow. Should be an error. |
950 | 1535 |
if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { |
1536 |
*buffer = '\0'; |
|
1537 |
return; |
|
1538 |
} |
|
1539 |
||
1540 |
if (pnamelen == 0) { |
|
2358 | 1541 |
snprintf(buffer, buflen, "lib%s.so", fname); |
1542 |
} else if (strchr(pname, *os::path_separator()) != NULL) { |
|
1543 |
int n; |
|
1544 |
char** pelements = split_path(pname, &n); |
|
1545 |
for (int i = 0 ; i < n ; i++) { |
|
1546 |
// Really shouldn't be NULL, but check can't hurt |
|
1547 |
if (pelements[i] == NULL || strlen(pelements[i]) == 0) { |
|
1548 |
continue; // skip the empty path values |
|
1549 |
} |
|
1550 |
snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); |
|
1551 |
if (file_exists(buffer)) { |
|
1552 |
break; |
|
1553 |
} |
|
1554 |
} |
|
1555 |
// release the storage |
|
1556 |
for (int i = 0 ; i < n ; i++) { |
|
1557 |
if (pelements[i] != NULL) { |
|
1558 |
FREE_C_HEAP_ARRAY(char, pelements[i]); |
|
1559 |
} |
|
1560 |
} |
|
1561 |
if (pelements != NULL) { |
|
1562 |
FREE_C_HEAP_ARRAY(char*, pelements); |
|
1563 |
} |
|
950 | 1564 |
} else { |
2358 | 1565 |
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); |
950 | 1566 |
} |
1567 |
} |
|
1568 |
||
1 | 1569 |
const char* os::get_current_directory(char *buf, int buflen) { |
1570 |
return getcwd(buf, buflen); |
|
1571 |
} |
|
1572 |
||
1573 |
// check if addr is inside libjvm[_g].so |
|
1574 |
bool os::address_is_in_vm(address addr) { |
|
1575 |
static address libjvm_base_addr; |
|
1576 |
Dl_info dlinfo; |
|
1577 |
||
1578 |
if (libjvm_base_addr == NULL) { |
|
1579 |
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); |
|
1580 |
libjvm_base_addr = (address)dlinfo.dli_fbase; |
|
1581 |
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); |
|
1582 |
} |
|
1583 |
||
1584 |
if (dladdr((void *)addr, &dlinfo)) { |
|
1585 |
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; |
|
1586 |
} |
|
1587 |
||
1588 |
return false; |
|
1589 |
} |
|
1590 |
||
1591 |
bool os::dll_address_to_function_name(address addr, char *buf, |
|
1592 |
int buflen, int *offset) { |
|
1593 |
Dl_info dlinfo; |
|
1594 |
||
1595 |
if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) { |
|
1596 |
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); |
|
1597 |
if (offset) *offset = addr - (address)dlinfo.dli_saddr; |
|
1598 |
return true; |
|
1599 |
} else { |
|
1600 |
if (buf) buf[0] = '\0'; |
|
1601 |
if (offset) *offset = -1; |
|
1602 |
return false; |
|
1603 |
} |
|
1604 |
} |
|
1605 |
||
1606 |
struct _address_to_library_name { |
|
1607 |
address addr; // input : memory address |
|
1608 |
size_t buflen; // size of fname |
|
1609 |
char* fname; // output: library name |
|
1610 |
address base; // library base addr |
|
1611 |
}; |
|
1612 |
||
1613 |
static int address_to_library_name_callback(struct dl_phdr_info *info, |
|
1614 |
size_t size, void *data) { |
|
1615 |
int i; |
|
1616 |
bool found = false; |
|
1617 |
address libbase = NULL; |
|
1618 |
struct _address_to_library_name * d = (struct _address_to_library_name *)data; |
|
1619 |
||
1620 |
// iterate through all loadable segments |
|
1621 |
for (i = 0; i < info->dlpi_phnum; i++) { |
|
1622 |
address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); |
|
1623 |
if (info->dlpi_phdr[i].p_type == PT_LOAD) { |
|
1624 |
// base address of a library is the lowest address of its loaded |
|
1625 |
// segments. |
|
1626 |
if (libbase == NULL || libbase > segbase) { |
|
1627 |
libbase = segbase; |
|
1628 |
} |
|
1629 |
// see if 'addr' is within current segment |
|
1630 |
if (segbase <= d->addr && |
|
1631 |
d->addr < segbase + info->dlpi_phdr[i].p_memsz) { |
|
1632 |
found = true; |
|
1633 |
} |
|
1634 |
} |
|
1635 |
} |
|
1636 |
||
1637 |
// dlpi_name is NULL or empty if the ELF file is executable, return 0 |
|
1638 |
// so dll_address_to_library_name() can fall through to use dladdr() which |
|
1639 |
// can figure out executable name from argv[0]. |
|
1640 |
if (found && info->dlpi_name && info->dlpi_name[0]) { |
|
1641 |
d->base = libbase; |
|
1642 |
if (d->fname) { |
|
1643 |
jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name); |
|
1644 |
} |
|
1645 |
return 1; |
|
1646 |
} |
|
1647 |
return 0; |
|
1648 |
} |
|
1649 |
||
1650 |
bool os::dll_address_to_library_name(address addr, char* buf, |
|
1651 |
int buflen, int* offset) { |
|
1652 |
Dl_info dlinfo; |
|
1653 |
struct _address_to_library_name data; |
|
1654 |
||
1655 |
// There is a bug in old glibc dladdr() implementation that it could resolve |
|
1656 |
// to wrong library name if the .so file has a base address != NULL. Here |
|
1657 |
// we iterate through the program headers of all loaded libraries to find |
|
1658 |
// out which library 'addr' really belongs to. This workaround can be |
|
1659 |
// removed once the minimum requirement for glibc is moved to 2.3.x. |
|
1660 |
data.addr = addr; |
|
1661 |
data.fname = buf; |
|
1662 |
data.buflen = buflen; |
|
1663 |
data.base = NULL; |
|
1664 |
int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data); |
|
1665 |
||
1666 |
if (rslt) { |
|
1667 |
// buf already contains library name |
|
1668 |
if (offset) *offset = addr - data.base; |
|
1669 |
return true; |
|
1670 |
} else if (dladdr((void*)addr, &dlinfo)){ |
|
1671 |
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); |
|
1672 |
if (offset) *offset = addr - (address)dlinfo.dli_fbase; |
|
1673 |
return true; |
|
1674 |
} else { |
|
1675 |
if (buf) buf[0] = '\0'; |
|
1676 |
if (offset) *offset = -1; |
|
1677 |
return false; |
|
1678 |
} |
|
1679 |
} |
|
1680 |
||
1681 |
// Loads .dll/.so and |
|
1682 |
// in case of error it checks if .dll/.so was built for the |
|
1683 |
// same architecture as Hotspot is running on |
|
1684 |
||
1685 |
void * os::dll_load(const char *filename, char *ebuf, int ebuflen) |
|
1686 |
{ |
|
1687 |
void * result= ::dlopen(filename, RTLD_LAZY); |
|
1688 |
if (result != NULL) { |
|
1689 |
// Successful loading |
|
1690 |
return result; |
|
1691 |
} |
|
1692 |
||
1693 |
Elf32_Ehdr elf_head; |
|
1694 |
||
1695 |
// Read system error message into ebuf |
|
1696 |
// It may or may not be overwritten below |
|
1697 |
::strncpy(ebuf, ::dlerror(), ebuflen-1); |
|
1698 |
ebuf[ebuflen-1]='\0'; |
|
1699 |
int diag_msg_max_length=ebuflen-strlen(ebuf); |
|
1700 |
char* diag_msg_buf=ebuf+strlen(ebuf); |
|
1701 |
||
1702 |
if (diag_msg_max_length==0) { |
|
1703 |
// No more space in ebuf for additional diagnostics message |
|
1704 |
return NULL; |
|
1705 |
} |
|
1706 |
||
1707 |
||
1708 |
int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); |
|
1709 |
||
1710 |
if (file_descriptor < 0) { |
|
1711 |
// Can't open library, report dlerror() message |
|
1712 |
return NULL; |
|
1713 |
} |
|
1714 |
||
1715 |
bool failed_to_read_elf_head= |
|
1716 |
(sizeof(elf_head)!= |
|
1717 |
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; |
|
1718 |
||
1719 |
::close(file_descriptor); |
|
1720 |
if (failed_to_read_elf_head) { |
|
1721 |
// file i/o error - report dlerror() msg |
|
1722 |
return NULL; |
|
1723 |
} |
|
1724 |
||
1725 |
typedef struct { |
|
1726 |
Elf32_Half code; // Actual value as defined in elf.h |
|
1727 |
Elf32_Half compat_class; // Compatibility of archs at VM's sense |
|
1728 |
char elf_class; // 32 or 64 bit |
|
1729 |
char endianess; // MSB or LSB |
|
1730 |
char* name; // String representation |
|
1731 |
} arch_t; |
|
1732 |
||
1733 |
#ifndef EM_486 |
|
1734 |
#define EM_486 6 /* Intel 80486 */ |
|
1735 |
#endif |
|
1736 |
||
1737 |
static const arch_t arch_array[]={ |
|
1738 |
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, |
|
1739 |
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, |
|
1740 |
{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, |
|
1741 |
{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, |
|
1742 |
{EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, |
|
1743 |
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, |
|
1744 |
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, |
|
1745 |
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, |
|
1746 |
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"} |
|
1747 |
}; |
|
1748 |
||
1749 |
#if (defined IA32) |
|
1750 |
static Elf32_Half running_arch_code=EM_386; |
|
1751 |
#elif (defined AMD64) |
|
1752 |
static Elf32_Half running_arch_code=EM_X86_64; |
|
1753 |
#elif (defined IA64) |
|
1754 |
static Elf32_Half running_arch_code=EM_IA_64; |
|
1755 |
#elif (defined __sparc) && (defined _LP64) |
|
1756 |
static Elf32_Half running_arch_code=EM_SPARCV9; |
|
1757 |
#elif (defined __sparc) && (!defined _LP64) |
|
1758 |
static Elf32_Half running_arch_code=EM_SPARC; |
|
1759 |
#elif (defined __powerpc64__) |
|
1760 |
static Elf32_Half running_arch_code=EM_PPC64; |
|
1761 |
#elif (defined __powerpc__) |
|
1762 |
static Elf32_Half running_arch_code=EM_PPC; |
|
1763 |
#else |
|
1764 |
#error Method os::dll_load requires that one of following is defined:\ |
|
1765 |
IA32, AMD64, IA64, __sparc, __powerpc__ |
|
1766 |
#endif |
|
1767 |
||
1768 |
// Identify compatability class for VM's architecture and library's architecture |
|
1769 |
// Obtain string descriptions for architectures |
|
1770 |
||
1771 |
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; |
|
1772 |
int running_arch_index=-1; |
|
1773 |
||
1774 |
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { |
|
1775 |
if (running_arch_code == arch_array[i].code) { |
|
1776 |
running_arch_index = i; |
|
1777 |
} |
|
1778 |
if (lib_arch.code == arch_array[i].code) { |
|
1779 |
lib_arch.compat_class = arch_array[i].compat_class; |
|
1780 |
lib_arch.name = arch_array[i].name; |
|
1781 |
} |
|
1782 |
} |
|
1783 |
||
1784 |
assert(running_arch_index != -1, |
|
1785 |
"Didn't find running architecture code (running_arch_code) in arch_array"); |
|
1786 |
if (running_arch_index == -1) { |
|
1787 |
// Even though running architecture detection failed |
|
1788 |
// we may still continue with reporting dlerror() message |
|
1789 |
return NULL; |
|
1790 |
} |
|
1791 |
||
1792 |
if (lib_arch.endianess != arch_array[running_arch_index].endianess) { |
|
1793 |
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); |
|
1794 |
return NULL; |
|
1795 |
} |
|
1796 |
||
1797 |
if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { |
|
1798 |
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); |
|
1799 |
return NULL; |
|
1800 |
} |
|
1801 |
||
1802 |
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { |
|
1803 |
if ( lib_arch.name!=NULL ) { |
|
1804 |
::snprintf(diag_msg_buf, diag_msg_max_length-1, |
|
1805 |
" (Possible cause: can't load %s-bit .so on a %s-bit platform)", |
|
1806 |
lib_arch.name, arch_array[running_arch_index].name); |
|
1807 |
} else { |
|
1808 |
::snprintf(diag_msg_buf, diag_msg_max_length-1, |
|
1809 |
" (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", |
|
1810 |
lib_arch.code, |
|
1811 |
arch_array[running_arch_index].name); |
|
1812 |
} |
|
1813 |
} |
|
1814 |
||
1815 |
return NULL; |
|
1816 |
} |
|
1817 |
||
950 | 1818 |
/* |
1819 |
* glibc-2.0 libdl is not MT safe. If you are building with any glibc, |
|
1820 |
* chances are you might want to run the generated bits against glibc-2.0 |
|
1821 |
* libdl.so, so always use locking for any version of glibc. |
|
1822 |
*/ |
|
1823 |
void* os::dll_lookup(void* handle, const char* name) { |
|
1824 |
pthread_mutex_lock(&dl_mutex); |
|
1825 |
void* res = dlsym(handle, name); |
|
1826 |
pthread_mutex_unlock(&dl_mutex); |
|
1827 |
return res; |
|
1828 |
} |
|
1 | 1829 |
|
1830 |
||
1831 |
bool _print_ascii_file(const char* filename, outputStream* st) { |
|
1832 |
int fd = open(filename, O_RDONLY); |
|
1833 |
if (fd == -1) { |
|
1834 |
return false; |
|
1835 |
} |
|
1836 |
||
1837 |
char buf[32]; |
|
1838 |
int bytes; |
|
1839 |
while ((bytes = read(fd, buf, sizeof(buf))) > 0) { |
|
1840 |
st->print_raw(buf, bytes); |
|
1841 |
} |
|
1842 |
||
1843 |
close(fd); |
|
1844 |
||
1845 |
return true; |
|
1846 |
} |
|
1847 |
||
1848 |
void os::print_dll_info(outputStream *st) { |
|
1849 |
st->print_cr("Dynamic libraries:"); |
|
1850 |
||
1851 |
char fname[32]; |
|
1852 |
pid_t pid = os::Linux::gettid(); |
|
1853 |
||
1854 |
jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid); |
|
1855 |
||
1856 |
if (!_print_ascii_file(fname, st)) { |
|
1857 |
st->print("Can not get library information for pid = %d\n", pid); |
|
1858 |
} |
|
1859 |
} |
|
1860 |
||
1861 |
||
1862 |
void os::print_os_info(outputStream* st) { |
|
1863 |
st->print("OS:"); |
|
1864 |
||
1865 |
// Try to identify popular distros. |
|
1866 |
// Most Linux distributions have /etc/XXX-release file, which contains |
|
1867 |
// the OS version string. Some have more than one /etc/XXX-release file |
|
1868 |
// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.), |
|
1869 |
// so the order is important. |
|
1870 |
if (!_print_ascii_file("/etc/mandrake-release", st) && |
|
1871 |
!_print_ascii_file("/etc/sun-release", st) && |
|
1872 |
!_print_ascii_file("/etc/redhat-release", st) && |
|
1873 |
!_print_ascii_file("/etc/SuSE-release", st) && |
|
1874 |
!_print_ascii_file("/etc/turbolinux-release", st) && |
|
1875 |
!_print_ascii_file("/etc/gentoo-release", st) && |
|
1876 |
!_print_ascii_file("/etc/debian_version", st)) { |
|
1877 |
st->print("Linux"); |
|
1878 |
} |
|
1879 |
st->cr(); |
|
1880 |
||
1881 |
// kernel |
|
1882 |
st->print("uname:"); |
|
1883 |
struct utsname name; |
|
1884 |
uname(&name); |
|
1885 |
st->print(name.sysname); st->print(" "); |
|
1886 |
st->print(name.release); st->print(" "); |
|
1887 |
st->print(name.version); st->print(" "); |
|
1888 |
st->print(name.machine); |
|
1889 |
st->cr(); |
|
1890 |
||
1891 |
// Print warning if unsafe chroot environment detected |
|
1892 |
if (unsafe_chroot_detected) { |
|
1893 |
st->print("WARNING!! "); |
|
1894 |
st->print_cr(unstable_chroot_error); |
|
1895 |
} |
|
1896 |
||
1897 |
// libc, pthread |
|
1898 |
st->print("libc:"); |
|
1899 |
st->print(os::Linux::glibc_version()); st->print(" "); |
|
1900 |
st->print(os::Linux::libpthread_version()); st->print(" "); |
|
1901 |
if (os::Linux::is_LinuxThreads()) { |
|
1902 |
st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed"); |
|
1903 |
} |
|
1904 |
st->cr(); |
|
1905 |
||
1906 |
// rlimit |
|
1907 |
st->print("rlimit:"); |
|
1908 |
struct rlimit rlim; |
|
1909 |
||
1910 |
st->print(" STACK "); |
|
1911 |
getrlimit(RLIMIT_STACK, &rlim); |
|
1912 |
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); |
|
1913 |
else st->print("%uk", rlim.rlim_cur >> 10); |
|
1914 |
||
1915 |
st->print(", CORE "); |
|
1916 |
getrlimit(RLIMIT_CORE, &rlim); |
|
1917 |
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); |
|
1918 |
else st->print("%uk", rlim.rlim_cur >> 10); |
|
1919 |
||
1920 |
st->print(", NPROC "); |
|
1921 |
getrlimit(RLIMIT_NPROC, &rlim); |
|
1922 |
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); |
|
1923 |
else st->print("%d", rlim.rlim_cur); |
|
1924 |
||
1925 |
st->print(", NOFILE "); |
|
1926 |
getrlimit(RLIMIT_NOFILE, &rlim); |
|
1927 |
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); |
|
1928 |
else st->print("%d", rlim.rlim_cur); |
|
1929 |
||
1930 |
st->print(", AS "); |
|
1931 |
getrlimit(RLIMIT_AS, &rlim); |
|
1932 |
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); |
|
1933 |
else st->print("%uk", rlim.rlim_cur >> 10); |
|
1934 |
st->cr(); |
|
1935 |
||
1936 |
// load average |
|
1937 |
st->print("load average:"); |
|
1938 |
double loadavg[3]; |
|
1939 |
os::loadavg(loadavg, 3); |
|
1940 |
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); |
|
1941 |
st->cr(); |
|
1942 |
} |
|
1943 |
||
1944 |
void os::print_memory_info(outputStream* st) { |
|
1945 |
||
1946 |
st->print("Memory:"); |
|
1947 |
st->print(" %dk page", os::vm_page_size()>>10); |
|
1948 |
||
1949 |
// values in struct sysinfo are "unsigned long" |
|
1950 |
struct sysinfo si; |
|
1951 |
sysinfo(&si); |
|
1952 |
||
1953 |
st->print(", physical " UINT64_FORMAT "k", |
|
1954 |
os::physical_memory() >> 10); |
|
1955 |
st->print("(" UINT64_FORMAT "k free)", |
|
1956 |
os::available_memory() >> 10); |
|
1957 |
st->print(", swap " UINT64_FORMAT "k", |
|
1958 |
((jlong)si.totalswap * si.mem_unit) >> 10); |
|
1959 |
st->print("(" UINT64_FORMAT "k free)", |
|
1960 |
((jlong)si.freeswap * si.mem_unit) >> 10); |
|
1961 |
st->cr(); |
|
1962 |
} |
|
1963 |
||
1964 |
// Taken from /usr/include/bits/siginfo.h Supposed to be architecture specific |
|
1965 |
// but they're the same for all the linux arch that we support |
|
1966 |
// and they're the same for solaris but there's no common place to put this. |
|
1967 |
const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", |
|
1968 |
"ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", |
|
1969 |
"ILL_COPROC", "ILL_BADSTK" }; |
|
1970 |
||
1971 |
const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", |
|
1972 |
"FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", |
|
1973 |
"FPE_FLTINV", "FPE_FLTSUB", "FPE_FLTDEN" }; |
|
1974 |
||
1975 |
const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; |
|
1976 |
||
1977 |
const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; |
|
1978 |
||
1979 |
void os::print_siginfo(outputStream* st, void* siginfo) { |
|
1980 |
st->print("siginfo:"); |
|
1981 |
||
1982 |
const int buflen = 100; |
|
1983 |
char buf[buflen]; |
|
1984 |
siginfo_t *si = (siginfo_t*)siginfo; |
|
1985 |
st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); |
|
1986 |
if (si->si_errno != 0 && strerror_r(si->si_errno, buf, buflen) == 0) { |
|
1987 |
st->print("si_errno=%s", buf); |
|
1988 |
} else { |
|
1989 |
st->print("si_errno=%d", si->si_errno); |
|
1990 |
} |
|
1991 |
const int c = si->si_code; |
|
1992 |
assert(c > 0, "unexpected si_code"); |
|
1993 |
switch (si->si_signo) { |
|
1994 |
case SIGILL: |
|
1995 |
st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); |
|
1996 |
st->print(", si_addr=" PTR_FORMAT, si->si_addr); |
|
1997 |
break; |
|
1998 |
case SIGFPE: |
|
1999 |
st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); |
|
2000 |
st->print(", si_addr=" PTR_FORMAT, si->si_addr); |
|
2001 |
break; |
|
2002 |
case SIGSEGV: |
|
2003 |
st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); |
|
2004 |
st->print(", si_addr=" PTR_FORMAT, si->si_addr); |
|
2005 |
break; |
|
2006 |
case SIGBUS: |
|
2007 |
st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); |
|
2008 |
st->print(", si_addr=" PTR_FORMAT, si->si_addr); |
|
2009 |
break; |
|
2010 |
default: |
|
2011 |
st->print(", si_code=%d", si->si_code); |
|
2012 |
// no si_addr |
|
2013 |
} |
|
2014 |
||
2015 |
if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && |
|
2016 |
UseSharedSpaces) { |
|
2017 |
FileMapInfo* mapinfo = FileMapInfo::current_info(); |
|
2018 |
if (mapinfo->is_in_shared_space(si->si_addr)) { |
|
2019 |
st->print("\n\nError accessing class data sharing archive." \ |
|
2020 |
" Mapped file inaccessible during execution, " \ |
|
2021 |
" possible disk/network problem."); |
|
2022 |
} |
|
2023 |
} |
|
2024 |
st->cr(); |
|
2025 |
} |
|
2026 |
||
2027 |
||
2028 |
static void print_signal_handler(outputStream* st, int sig, |
|
2029 |
char* buf, size_t buflen); |
|
2030 |
||
2031 |
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { |
|
2032 |
st->print_cr("Signal Handlers:"); |
|
2033 |
print_signal_handler(st, SIGSEGV, buf, buflen); |
|
2034 |
print_signal_handler(st, SIGBUS , buf, buflen); |
|
2035 |
print_signal_handler(st, SIGFPE , buf, buflen); |
|
2036 |
print_signal_handler(st, SIGPIPE, buf, buflen); |
|
2037 |
print_signal_handler(st, SIGXFSZ, buf, buflen); |
|
2038 |
print_signal_handler(st, SIGILL , buf, buflen); |
|
2039 |
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); |
|
2040 |
print_signal_handler(st, SR_signum, buf, buflen); |
|
2041 |
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); |
|
2042 |
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); |
|
2043 |
print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); |
|
2044 |
print_signal_handler(st, BREAK_SIGNAL, buf, buflen); |
|
2045 |
} |
|
2046 |
||
2047 |
static char saved_jvm_path[MAXPATHLEN] = {0}; |
|
2048 |
||
2049 |
// Find the full path to the current module, libjvm.so or libjvm_g.so |
|
2050 |
void os::jvm_path(char *buf, jint len) { |
|
2051 |
// Error checking. |
|
2052 |
if (len < MAXPATHLEN) { |
|
2053 |
assert(false, "must use a large-enough buffer"); |
|
2054 |
buf[0] = '\0'; |
|
2055 |
return; |
|
2056 |
} |
|
2057 |
// Lazy resolve the path to current module. |
|
2058 |
if (saved_jvm_path[0] != 0) { |
|
2059 |
strcpy(buf, saved_jvm_path); |
|
2060 |
return; |
|
2061 |
} |
|
2062 |
||
2063 |
char dli_fname[MAXPATHLEN]; |
|
2064 |
bool ret = dll_address_to_library_name( |
|
2065 |
CAST_FROM_FN_PTR(address, os::jvm_path), |
|
2066 |
dli_fname, sizeof(dli_fname), NULL); |
|
2067 |
assert(ret != 0, "cannot locate libjvm"); |
|
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2068 |
if (realpath(dli_fname, buf) == NULL) |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2069 |
return; |
1 | 2070 |
|
2071 |
if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { |
|
2072 |
// Support for the gamma launcher. Typical value for buf is |
|
2073 |
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at |
|
2074 |
// the right place in the string, then assume we are installed in a JDK and |
|
2075 |
// we're done. Otherwise, check for a JAVA_HOME environment variable and fix |
|
2076 |
// up the path so it looks like libjvm.so is installed there (append a |
|
2077 |
// fake suffix hotspot/libjvm.so). |
|
2078 |
const char *p = buf + strlen(buf) - 1; |
|
2079 |
for (int count = 0; p > buf && count < 5; ++count) { |
|
2080 |
for (--p; p > buf && *p != '/'; --p) |
|
2081 |
/* empty */ ; |
|
2082 |
} |
|
2083 |
||
2084 |
if (strncmp(p, "/jre/lib/", 9) != 0) { |
|
2085 |
// Look for JAVA_HOME in the environment. |
|
2086 |
char* java_home_var = ::getenv("JAVA_HOME"); |
|
2087 |
if (java_home_var != NULL && java_home_var[0] != 0) { |
|
2088 |
// Check the current module name "libjvm.so" or "libjvm_g.so". |
|
2089 |
p = strrchr(buf, '/'); |
|
2090 |
assert(strstr(p, "/libjvm") == p, "invalid library name"); |
|
2091 |
p = strstr(p, "_g") ? "_g" : ""; |
|
2092 |
||
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2093 |
if (realpath(java_home_var, buf) == NULL) |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2094 |
return; |
1 | 2095 |
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch); |
2096 |
if (0 == access(buf, F_OK)) { |
|
2097 |
// Use current module name "libjvm[_g].so" instead of |
|
2098 |
// "libjvm"debug_only("_g")".so" since for fastdebug version |
|
2099 |
// we should have "libjvm.so" but debug_only("_g") adds "_g"! |
|
2100 |
// It is used when we are choosing the HPI library's name |
|
2101 |
// "libhpi[_g].so" in hpi::initialize_get_interface(). |
|
2102 |
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p); |
|
2103 |
} else { |
|
2104 |
// Go back to path of .so |
|
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2105 |
if (realpath(dli_fname, buf) == NULL) |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
2106 |
return; |
1 | 2107 |
} |
2108 |
} |
|
2109 |
} |
|
2110 |
} |
|
2111 |
||
2112 |
strcpy(saved_jvm_path, buf); |
|
2113 |
} |
|
2114 |
||
2115 |
void os::print_jni_name_prefix_on(outputStream* st, int args_size) { |
|
2116 |
// no prefix required, not even "_" |
|
2117 |
} |
|
2118 |
||
2119 |
void os::print_jni_name_suffix_on(outputStream* st, int args_size) { |
|
2120 |
// no suffix required |
|
2121 |
} |
|
2122 |
||
2123 |
//////////////////////////////////////////////////////////////////////////////// |
|
2124 |
// sun.misc.Signal support |
|
2125 |
||
2126 |
static volatile jint sigint_count = 0; |
|
2127 |
||
2128 |
static void |
|
2129 |
UserHandler(int sig, void *siginfo, void *context) { |
|
2130 |
// 4511530 - sem_post is serialized and handled by the manager thread. When |
|
2131 |
// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We |
|
2132 |
// don't want to flood the manager thread with sem_post requests. |
|
2133 |
if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) |
|
2134 |
return; |
|
2135 |
||
2136 |
// Ctrl-C is pressed during error reporting, likely because the error |
|
2137 |
// handler fails to abort. Let VM die immediately. |
|
2138 |
if (sig == SIGINT && is_error_reported()) { |
|
2139 |
os::die(); |
|
2140 |
} |
|
2141 |
||
2142 |
os::signal_notify(sig); |
|
2143 |
} |
|
2144 |
||
2145 |
void* os::user_handler() { |
|
2146 |
return CAST_FROM_FN_PTR(void*, UserHandler); |
|
2147 |
} |
|
2148 |
||
2149 |
extern "C" { |
|
2150 |
typedef void (*sa_handler_t)(int); |
|
2151 |
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); |
|
2152 |
} |
|
2153 |
||
2154 |
void* os::signal(int signal_number, void* handler) { |
|
2155 |
struct sigaction sigAct, oldSigAct; |
|
2156 |
||
2157 |
sigfillset(&(sigAct.sa_mask)); |
|
2158 |
sigAct.sa_flags = SA_RESTART|SA_SIGINFO; |
|
2159 |
sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); |
|
2160 |
||
2161 |
if (sigaction(signal_number, &sigAct, &oldSigAct)) { |
|
2162 |
// -1 means registration failed |
|
2163 |
return (void *)-1; |
|
2164 |
} |
|
2165 |
||
2166 |
return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); |
|
2167 |
} |
|
2168 |
||
2169 |
void os::signal_raise(int signal_number) { |
|
2170 |
::raise(signal_number); |
|
2171 |
} |
|
2172 |
||
2173 |
/* |
|
2174 |
* The following code is moved from os.cpp for making this |
|
2175 |
* code platform specific, which it is by its very nature. |
|
2176 |
*/ |
|
2177 |
||
2178 |
// Will be modified when max signal is changed to be dynamic |
|
2179 |
int os::sigexitnum_pd() { |
|
2180 |
return NSIG; |
|
2181 |
} |
|
2182 |
||
2183 |
// a counter for each possible signal value |
|
2184 |
static volatile jint pending_signals[NSIG+1] = { 0 }; |
|
2185 |
||
2186 |
// Linux(POSIX) specific hand shaking semaphore. |
|
2187 |
static sem_t sig_sem; |
|
2188 |
||
2189 |
void os::signal_init_pd() { |
|
2190 |
// Initialize signal structures |
|
2191 |
::memset((void*)pending_signals, 0, sizeof(pending_signals)); |
|
2192 |
||
2193 |
// Initialize signal semaphore |
|
2194 |
::sem_init(&sig_sem, 0, 0); |
|
2195 |
} |
|
2196 |
||
2197 |
void os::signal_notify(int sig) { |
|
2198 |
Atomic::inc(&pending_signals[sig]); |
|
2199 |
::sem_post(&sig_sem); |
|
2200 |
} |
|
2201 |
||
2202 |
static int check_pending_signals(bool wait) { |
|
2203 |
Atomic::store(0, &sigint_count); |
|
2204 |
for (;;) { |
|
2205 |
for (int i = 0; i < NSIG + 1; i++) { |
|
2206 |
jint n = pending_signals[i]; |
|
2207 |
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { |
|
2208 |
return i; |
|
2209 |
} |
|
2210 |
} |
|
2211 |
if (!wait) { |
|
2212 |
return -1; |
|
2213 |
} |
|
2214 |
JavaThread *thread = JavaThread::current(); |
|
2215 |
ThreadBlockInVM tbivm(thread); |
|
2216 |
||
2217 |
bool threadIsSuspended; |
|
2218 |
do { |
|
2219 |
thread->set_suspend_equivalent(); |
|
2220 |
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() |
|
2221 |
::sem_wait(&sig_sem); |
|
2222 |
||
2223 |
// were we externally suspended while we were waiting? |
|
2224 |
threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); |
|
2225 |
if (threadIsSuspended) { |
|
2226 |
// |
|
2227 |
// The semaphore has been incremented, but while we were waiting |
|
2228 |
// another thread suspended us. We don't want to continue running |
|
2229 |
// while suspended because that would surprise the thread that |
|
2230 |
// suspended us. |
|
2231 |
// |
|
2232 |
::sem_post(&sig_sem); |
|
2233 |
||
2234 |
thread->java_suspend_self(); |
|
2235 |
} |
|
2236 |
} while (threadIsSuspended); |
|
2237 |
} |
|
2238 |
} |
|
2239 |
||
2240 |
int os::signal_lookup() { |
|
2241 |
return check_pending_signals(false); |
|
2242 |
} |
|
2243 |
||
2244 |
int os::signal_wait() { |
|
2245 |
return check_pending_signals(true); |
|
2246 |
} |
|
2247 |
||
2248 |
//////////////////////////////////////////////////////////////////////////////// |
|
2249 |
// Virtual Memory |
|
2250 |
||
2251 |
int os::vm_page_size() { |
|
2252 |
// Seems redundant as all get out |
|
2253 |
assert(os::Linux::page_size() != -1, "must call os::init"); |
|
2254 |
return os::Linux::page_size(); |
|
2255 |
} |
|
2256 |
||
2257 |
// Solaris allocates memory by pages. |
|
2258 |
int os::vm_allocation_granularity() { |
|
2259 |
assert(os::Linux::page_size() != -1, "must call os::init"); |
|
2260 |
return os::Linux::page_size(); |
|
2261 |
} |
|
2262 |
||
2263 |
// Rationale behind this function: |
|
2264 |
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable |
|
2265 |
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get |
|
2266 |
// samples for JITted code. Here we create private executable mapping over the code cache |
|
2267 |
// and then we can use standard (well, almost, as mapping can change) way to provide |
|
2268 |
// info for the reporting script by storing timestamp and location of symbol |
|
2269 |
void linux_wrap_code(char* base, size_t size) { |
|
2270 |
static volatile jint cnt = 0; |
|
2271 |
||
2272 |
if (!UseOprofile) { |
|
2273 |
return; |
|
2274 |
} |
|
2275 |
||
2276 |
char buf[40]; |
|
2277 |
int num = Atomic::add(1, &cnt); |
|
2278 |
||
2279 |
sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num); |
|
2280 |
unlink(buf); |
|
2281 |
||
2282 |
int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU); |
|
2283 |
||
2284 |
if (fd != -1) { |
|
2285 |
off_t rv = lseek(fd, size-2, SEEK_SET); |
|
2286 |
if (rv != (off_t)-1) { |
|
2287 |
if (write(fd, "", 1) == 1) { |
|
2288 |
mmap(base, size, |
|
2289 |
PROT_READ|PROT_WRITE|PROT_EXEC, |
|
2290 |
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0); |
|
2291 |
} |
|
2292 |
} |
|
2293 |
close(fd); |
|
2294 |
unlink(buf); |
|
2295 |
} |
|
2296 |
} |
|
2297 |
||
2298 |
// NOTE: Linux kernel does not really reserve the pages for us. |
|
2299 |
// All it does is to check if there are enough free pages |
|
2300 |
// left at the time of mmap(). This could be a potential |
|
2301 |
// problem. |
|
2268 | 2302 |
bool os::commit_memory(char* addr, size_t size, bool exec) { |
2303 |
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; |
|
2304 |
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, |
|
1 | 2305 |
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); |
2306 |
return res != (uintptr_t) MAP_FAILED; |
|
2307 |
} |
|
2308 |
||
2268 | 2309 |
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, |
2310 |
bool exec) { |
|
2311 |
return commit_memory(addr, size, exec); |
|
1 | 2312 |
} |
2313 |
||
2314 |
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } |
|
388 | 2315 |
|
2316 |
void os::free_memory(char *addr, size_t bytes) { |
|
2317 |
uncommit_memory(addr, bytes); |
|
2318 |
} |
|
2319 |
||
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2320 |
void os::numa_make_global(char *addr, size_t bytes) { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2321 |
Linux::numa_interleave_memory(addr, bytes); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2322 |
} |
388 | 2323 |
|
2324 |
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { |
|
2325 |
Linux::numa_tonode_memory(addr, bytes, lgrp_hint); |
|
2326 |
} |
|
2327 |
||
2328 |
bool os::numa_topology_changed() { return false; } |
|
2329 |
||
2330 |
size_t os::numa_get_groups_num() { |
|
2331 |
int max_node = Linux::numa_max_node(); |
|
2332 |
return max_node > 0 ? max_node + 1 : 1; |
|
2333 |
} |
|
2334 |
||
2335 |
int os::numa_get_group_id() { |
|
2336 |
int cpu_id = Linux::sched_getcpu(); |
|
2337 |
if (cpu_id != -1) { |
|
2338 |
int lgrp_id = Linux::get_node_by_cpu(cpu_id); |
|
2339 |
if (lgrp_id != -1) { |
|
2340 |
return lgrp_id; |
|
2341 |
} |
|
1 | 2342 |
} |
2343 |
return 0; |
|
2344 |
} |
|
2345 |
||
388 | 2346 |
size_t os::numa_get_leaf_groups(int *ids, size_t size) { |
2347 |
for (size_t i = 0; i < size; i++) { |
|
2348 |
ids[i] = i; |
|
2349 |
} |
|
2350 |
return size; |
|
2351 |
} |
|
2352 |
||
1 | 2353 |
bool os::get_page_info(char *start, page_info* info) { |
2354 |
return false; |
|
2355 |
} |
|
2356 |
||
2357 |
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { |
|
2358 |
return end; |
|
2359 |
} |
|
2360 |
||
388 | 2361 |
extern "C" void numa_warn(int number, char *where, ...) { } |
2362 |
extern "C" void numa_error(char *where) { } |
|
2363 |
||
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2364 |
bool os::Linux::libnuma_init() { |
388 | 2365 |
// sched_getcpu() should be in libc. |
2366 |
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, |
|
2367 |
dlsym(RTLD_DEFAULT, "sched_getcpu"))); |
|
2368 |
||
2369 |
if (sched_getcpu() != -1) { // Does it work? |
|
975
ad7da100aa6a
6720130: NUMA allocator: The linux version should search for libnuma.so.1
iveresov
parents:
745
diff
changeset
|
2370 |
void *handle = dlopen("libnuma.so.1", RTLD_LAZY); |
388 | 2371 |
if (handle != NULL) { |
2372 |
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, |
|
2373 |
dlsym(handle, "numa_node_to_cpus"))); |
|
2374 |
set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t, |
|
2375 |
dlsym(handle, "numa_max_node"))); |
|
2376 |
set_numa_available(CAST_TO_FN_PTR(numa_available_func_t, |
|
2377 |
dlsym(handle, "numa_available"))); |
|
2378 |
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t, |
|
2379 |
dlsym(handle, "numa_tonode_memory"))); |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2380 |
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2381 |
dlsym(handle, "numa_interleave_memory"))); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2382 |
|
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2383 |
|
388 | 2384 |
if (numa_available() != -1) { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2385 |
set_numa_all_nodes((unsigned long*)dlsym(handle, "numa_all_nodes")); |
388 | 2386 |
// Create a cpu -> node mapping |
2387 |
_cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true); |
|
2388 |
rebuild_cpu_to_node_map(); |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2389 |
return true; |
388 | 2390 |
} |
2391 |
} |
|
2392 |
} |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2393 |
return false; |
388 | 2394 |
} |
2395 |
||
2396 |
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id. |
|
2397 |
// The table is later used in get_node_by_cpu(). |
|
2398 |
void os::Linux::rebuild_cpu_to_node_map() { |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2399 |
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2400 |
// in libnuma (possible values are starting from 16, |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2401 |
// and continuing up with every other power of 2, but less |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2402 |
// than the maximum number of CPUs supported by kernel), and |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2403 |
// is a subject to change (in libnuma version 2 the requirements |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2404 |
// are more reasonable) we'll just hardcode the number they use |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2405 |
// in the library. |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2406 |
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2407 |
|
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2408 |
size_t cpu_num = os::active_processor_count(); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2409 |
size_t cpu_map_size = NCPUS / BitsPerCLong; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2410 |
size_t cpu_map_valid_size = |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2411 |
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2412 |
|
388 | 2413 |
cpu_to_node()->clear(); |
2414 |
cpu_to_node()->at_grow(cpu_num - 1); |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2415 |
size_t node_num = numa_get_groups_num(); |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2416 |
|
388 | 2417 |
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2418 |
for (size_t i = 0; i < node_num; i++) { |
388 | 2419 |
if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2420 |
for (size_t j = 0; j < cpu_map_valid_size; j++) { |
388 | 2421 |
if (cpu_map[j] != 0) { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2422 |
for (size_t k = 0; k < BitsPerCLong; k++) { |
388 | 2423 |
if (cpu_map[j] & (1UL << k)) { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2424 |
cpu_to_node()->at_put(j * BitsPerCLong + k, i); |
388 | 2425 |
} |
2426 |
} |
|
2427 |
} |
|
2428 |
} |
|
2429 |
} |
|
2430 |
} |
|
2431 |
FREE_C_HEAP_ARRAY(unsigned long, cpu_map); |
|
2432 |
} |
|
2433 |
||
2434 |
int os::Linux::get_node_by_cpu(int cpu_id) { |
|
2435 |
if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) { |
|
2436 |
return cpu_to_node()->at(cpu_id); |
|
2437 |
} |
|
2438 |
return -1; |
|
2439 |
} |
|
2440 |
||
2441 |
GrowableArray<int>* os::Linux::_cpu_to_node; |
|
2442 |
os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; |
|
2443 |
os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; |
|
2444 |
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node; |
|
2445 |
os::Linux::numa_available_func_t os::Linux::_numa_available; |
|
2446 |
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; |
|
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2447 |
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
2448 |
unsigned long* os::Linux::_numa_all_nodes; |
388 | 2449 |
|
1 | 2450 |
bool os::uncommit_memory(char* addr, size_t size) { |
2268 | 2451 |
return ::mmap(addr, size, PROT_NONE, |
1 | 2452 |
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0) |
2453 |
!= MAP_FAILED; |
|
2454 |
} |
|
2455 |
||
2456 |
static address _highest_vm_reserved_address = NULL; |
|
2457 |
||
2458 |
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory |
|
2459 |
// at 'requested_addr'. If there are existing memory mappings at the same |
|
2460 |
// location, however, they will be overwritten. If 'fixed' is false, |
|
2461 |
// 'requested_addr' is only treated as a hint, the return value may or |
|
2462 |
// may not start from the requested address. Unlike Linux mmap(), this |
|
2463 |
// function returns NULL to indicate failure. |
|
2464 |
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { |
|
2465 |
char * addr; |
|
2466 |
int flags; |
|
2467 |
||
2468 |
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; |
|
2469 |
if (fixed) { |
|
2470 |
assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address"); |
|
2471 |
flags |= MAP_FIXED; |
|
2472 |
} |
|
2473 |
||
2268 | 2474 |
// Map uncommitted pages PROT_READ and PROT_WRITE, change access |
2475 |
// to PROT_EXEC if executable when we commit the page. |
|
2476 |
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE, |
|
1 | 2477 |
flags, -1, 0); |
2478 |
||
2479 |
if (addr != MAP_FAILED) { |
|
2480 |
// anon_mmap() should only get called during VM initialization, |
|
2481 |
// don't need lock (actually we can skip locking even it can be called |
|
2482 |
// from multiple threads, because _highest_vm_reserved_address is just a |
|
2483 |
// hint about the upper limit of non-stack memory regions.) |
|
2484 |
if ((address)addr + bytes > _highest_vm_reserved_address) { |
|
2485 |
_highest_vm_reserved_address = (address)addr + bytes; |
|
2486 |
} |
|
2487 |
} |
|
2488 |
||
2489 |
return addr == MAP_FAILED ? NULL : addr; |
|
2490 |
} |
|
2491 |
||
2492 |
// Don't update _highest_vm_reserved_address, because there might be memory |
|
2493 |
// regions above addr + size. If so, releasing a memory region only creates |
|
2494 |
// a hole in the address space, it doesn't help prevent heap-stack collision. |
|
2495 |
// |
|
2496 |
static int anon_munmap(char * addr, size_t size) { |
|
2497 |
return ::munmap(addr, size) == 0; |
|
2498 |
} |
|
2499 |
||
2500 |
char* os::reserve_memory(size_t bytes, char* requested_addr, |
|
2501 |
size_t alignment_hint) { |
|
2502 |
return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); |
|
2503 |
} |
|
2504 |
||
2505 |
bool os::release_memory(char* addr, size_t size) { |
|
2506 |
return anon_munmap(addr, size); |
|
2507 |
} |
|
2508 |
||
2509 |
static address highest_vm_reserved_address() { |
|
2510 |
return _highest_vm_reserved_address; |
|
2511 |
} |
|
2512 |
||
2513 |
static bool linux_mprotect(char* addr, size_t size, int prot) { |
|
2514 |
// Linux wants the mprotect address argument to be page aligned. |
|
2515 |
char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size()); |
|
2516 |
||
2517 |
// According to SUSv3, mprotect() should only be used with mappings |
|
2518 |
// established by mmap(), and mmap() always maps whole pages. Unaligned |
|
2519 |
// 'addr' likely indicates problem in the VM (e.g. trying to change |
|
2520 |
// protection of malloc'ed or statically allocated memory). Check the |
|
2521 |
// caller if you hit this assert. |
|
2522 |
assert(addr == bottom, "sanity check"); |
|
2523 |
||
2524 |
size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size()); |
|
2525 |
return ::mprotect(bottom, size, prot) == 0; |
|
2526 |
} |
|
2527 |
||
823
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2528 |
// Set protections specified |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2529 |
bool os::protect_memory(char* addr, size_t bytes, ProtType prot, |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2530 |
bool is_committed) { |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2531 |
unsigned int p = 0; |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2532 |
switch (prot) { |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2533 |
case MEM_PROT_NONE: p = PROT_NONE; break; |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2534 |
case MEM_PROT_READ: p = PROT_READ; break; |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2535 |
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2536 |
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2537 |
default: |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2538 |
ShouldNotReachHere(); |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2539 |
} |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2540 |
// is_committed is unused. |
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
2541 |
return linux_mprotect(addr, bytes, p); |
1 | 2542 |
} |
2543 |
||
2544 |
bool os::guard_memory(char* addr, size_t size) { |
|
2545 |
return linux_mprotect(addr, size, PROT_NONE); |
|
2546 |
} |
|
2547 |
||
2548 |
bool os::unguard_memory(char* addr, size_t size) { |
|
1664
fc9ed50498fb
6727377: VM stack guard pages on Windows should PAGE_READWRITE not PAGE_EXECUTE_READWRITE
coleenp
parents:
1615
diff
changeset
|
2549 |
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); |
1 | 2550 |
} |
2551 |
||
2552 |
// Large page support |
|
2553 |
||
2554 |
static size_t _large_page_size = 0; |
|
2555 |
||
2556 |
bool os::large_page_init() { |
|
2557 |
if (!UseLargePages) return false; |
|
2558 |
||
2559 |
if (LargePageSizeInBytes) { |
|
2560 |
_large_page_size = LargePageSizeInBytes; |
|
2561 |
} else { |
|
2562 |
// large_page_size on Linux is used to round up heap size. x86 uses either |
|
2563 |
// 2M or 4M page, depending on whether PAE (Physical Address Extensions) |
|
2564 |
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use |
|
2565 |
// page as large as 256M. |
|
2566 |
// |
|
2567 |
// Here we try to figure out page size by parsing /proc/meminfo and looking |
|
2568 |
// for a line with the following format: |
|
2569 |
// Hugepagesize: 2048 kB |
|
2570 |
// |
|
2571 |
// If we can't determine the value (e.g. /proc is not mounted, or the text |
|
2572 |
// format has been changed), we'll use the largest page size supported by |
|
2573 |
// the processor. |
|
2574 |
||
2575 |
_large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M); |
|
2576 |
||
2577 |
FILE *fp = fopen("/proc/meminfo", "r"); |
|
2578 |
if (fp) { |
|
2579 |
while (!feof(fp)) { |
|
2580 |
int x = 0; |
|
2581 |
char buf[16]; |
|
2582 |
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) { |
|
2583 |
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) { |
|
2584 |
_large_page_size = x * K; |
|
2585 |
break; |
|
2586 |
} |
|
2587 |
} else { |
|
2588 |
// skip to next line |
|
2589 |
for (;;) { |
|
2590 |
int ch = fgetc(fp); |
|
2591 |
if (ch == EOF || ch == (int)'\n') break; |
|
2592 |
} |
|
2593 |
} |
|
2594 |
} |
|
2595 |
fclose(fp); |
|
2596 |
} |
|
2597 |
} |
|
2598 |
||
2599 |
const size_t default_page_size = (size_t)Linux::page_size(); |
|
2600 |
if (_large_page_size > default_page_size) { |
|
2601 |
_page_sizes[0] = _large_page_size; |
|
2602 |
_page_sizes[1] = default_page_size; |
|
2603 |
_page_sizes[2] = 0; |
|
2604 |
} |
|
2605 |
||
2606 |
// Large page support is available on 2.6 or newer kernel, some vendors |
|
2607 |
// (e.g. Redhat) have backported it to their 2.4 based distributions. |
|
2608 |
// We optimistically assume the support is available. If later it turns out |
|
2609 |
// not true, VM will automatically switch to use regular page size. |
|
2610 |
return true; |
|
2611 |
} |
|
2612 |
||
2613 |
#ifndef SHM_HUGETLB |
|
2614 |
#define SHM_HUGETLB 04000 |
|
2615 |
#endif |
|
2616 |
||
2268 | 2617 |
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { |
2618 |
// "exec" is passed in but not used. Creating the shared image for |
|
2619 |
// the code cache doesn't have an SHM_X executable permission to check. |
|
1 | 2620 |
assert(UseLargePages, "only for large pages"); |
2621 |
||
2622 |
key_t key = IPC_PRIVATE; |
|
2623 |
char *addr; |
|
2624 |
||
2625 |
bool warn_on_failure = UseLargePages && |
|
2626 |
(!FLAG_IS_DEFAULT(UseLargePages) || |
|
2627 |
!FLAG_IS_DEFAULT(LargePageSizeInBytes) |
|
2628 |
); |
|
2629 |
char msg[128]; |
|
2630 |
||
2631 |
// Create a large shared memory region to attach to based on size. |
|
2632 |
// Currently, size is the total size of the heap |
|
2633 |
int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); |
|
2634 |
if (shmid == -1) { |
|
2635 |
// Possible reasons for shmget failure: |
|
2636 |
// 1. shmmax is too small for Java heap. |
|
2637 |
// > check shmmax value: cat /proc/sys/kernel/shmmax |
|
2638 |
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax |
|
2639 |
// 2. not enough large page memory. |
|
2640 |
// > check available large pages: cat /proc/meminfo |
|
2641 |
// > increase amount of large pages: |
|
2642 |
// echo new_value > /proc/sys/vm/nr_hugepages |
|
2643 |
// Note 1: different Linux may use different name for this property, |
|
2644 |
// e.g. on Redhat AS-3 it is "hugetlb_pool". |
|
2645 |
// Note 2: it's possible there's enough physical memory available but |
|
2646 |
// they are so fragmented after a long run that they can't |
|
2647 |
// coalesce into large pages. Try to reserve large pages when |
|
2648 |
// the system is still "fresh". |
|
2649 |
if (warn_on_failure) { |
|
2650 |
jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); |
|
2651 |
warning(msg); |
|
2652 |
} |
|
2653 |
return NULL; |
|
2654 |
} |
|
2655 |
||
2656 |
// attach to the region |
|
2657 |
addr = (char*)shmat(shmid, NULL, 0); |
|
2658 |
int err = errno; |
|
2659 |
||
2660 |
// Remove shmid. If shmat() is successful, the actual shared memory segment |
|
2661 |
// will be deleted when it's detached by shmdt() or when the process |
|
2662 |
// terminates. If shmat() is not successful this will remove the shared |
|
2663 |
// segment immediately. |
|
2664 |
shmctl(shmid, IPC_RMID, NULL); |
|
2665 |
||
2666 |
if ((intptr_t)addr == -1) { |
|
2667 |
if (warn_on_failure) { |
|
2668 |
jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); |
|
2669 |
warning(msg); |
|
2670 |
} |
|
2671 |
return NULL; |
|
2672 |
} |
|
2673 |
||
2674 |
return addr; |
|
2675 |
} |
|
2676 |
||
2677 |
bool os::release_memory_special(char* base, size_t bytes) { |
|
2678 |
// detaching the SHM segment will also delete it, see reserve_memory_special() |
|
2679 |
int rslt = shmdt(base); |
|
2680 |
return rslt == 0; |
|
2681 |
} |
|
2682 |
||
2683 |
size_t os::large_page_size() { |
|
2684 |
return _large_page_size; |
|
2685 |
} |
|
2686 |
||
2687 |
// Linux does not support anonymous mmap with large page memory. The only way |
|
2688 |
// to reserve large page memory without file backing is through SysV shared |
|
2689 |
// memory API. The entire memory region is committed and pinned upfront. |
|
2690 |
// Hopefully this will change in the future... |
|
2691 |
bool os::can_commit_large_page_memory() { |
|
2692 |
return false; |
|
2693 |
} |
|
2694 |
||
252
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2695 |
bool os::can_execute_large_page_memory() { |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2696 |
return false; |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2697 |
} |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2698 |
|
1 | 2699 |
// Reserve memory at an arbitrary address, only if that area is |
2700 |
// available (and not reserved for something else). |
|
2701 |
||
2702 |
char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { |
|
2703 |
const int max_tries = 10; |
|
2704 |
char* base[max_tries]; |
|
2705 |
size_t size[max_tries]; |
|
2706 |
const size_t gap = 0x000000; |
|
2707 |
||
2708 |
// Assert only that the size is a multiple of the page size, since |
|
2709 |
// that's all that mmap requires, and since that's all we really know |
|
2710 |
// about at this low abstraction level. If we need higher alignment, |
|
2711 |
// we can either pass an alignment to this method or verify alignment |
|
2712 |
// in one of the methods further up the call chain. See bug 5044738. |
|
2713 |
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); |
|
2714 |
||
2715 |
// Repeatedly allocate blocks until the block is allocated at the |
|
2716 |
// right spot. Give up after max_tries. Note that reserve_memory() will |
|
2717 |
// automatically update _highest_vm_reserved_address if the call is |
|
2718 |
// successful. The variable tracks the highest memory address every reserved |
|
2719 |
// by JVM. It is used to detect heap-stack collision if running with |
|
2720 |
// fixed-stack LinuxThreads. Because here we may attempt to reserve more |
|
2721 |
// space than needed, it could confuse the collision detecting code. To |
|
2722 |
// solve the problem, save current _highest_vm_reserved_address and |
|
2723 |
// calculate the correct value before return. |
|
2724 |
address old_highest = _highest_vm_reserved_address; |
|
2725 |
||
2726 |
// Linux mmap allows caller to pass an address as hint; give it a try first, |
|
2727 |
// if kernel honors the hint then we can return immediately. |
|
2728 |
char * addr = anon_mmap(requested_addr, bytes, false); |
|
2729 |
if (addr == requested_addr) { |
|
2730 |
return requested_addr; |
|
2731 |
} |
|
2732 |
||
2733 |
if (addr != NULL) { |
|
2734 |
// mmap() is successful but it fails to reserve at the requested address |
|
2735 |
anon_munmap(addr, bytes); |
|
2736 |
} |
|
2737 |
||
2738 |
int i; |
|
2739 |
for (i = 0; i < max_tries; ++i) { |
|
2740 |
base[i] = reserve_memory(bytes); |
|
2741 |
||
2742 |
if (base[i] != NULL) { |
|
2743 |
// Is this the block we wanted? |
|
2744 |
if (base[i] == requested_addr) { |
|
2745 |
size[i] = bytes; |
|
2746 |
break; |
|
2747 |
} |
|
2748 |
||
2749 |
// Does this overlap the block we wanted? Give back the overlapped |
|
2750 |
// parts and try again. |
|
2751 |
||
2752 |
size_t top_overlap = requested_addr + (bytes + gap) - base[i]; |
|
2753 |
if (top_overlap >= 0 && top_overlap < bytes) { |
|
2754 |
unmap_memory(base[i], top_overlap); |
|
2755 |
base[i] += top_overlap; |
|
2756 |
size[i] = bytes - top_overlap; |
|
2757 |
} else { |
|
2758 |
size_t bottom_overlap = base[i] + bytes - requested_addr; |
|
2759 |
if (bottom_overlap >= 0 && bottom_overlap < bytes) { |
|
2760 |
unmap_memory(requested_addr, bottom_overlap); |
|
2761 |
size[i] = bytes - bottom_overlap; |
|
2762 |
} else { |
|
2763 |
size[i] = bytes; |
|
2764 |
} |
|
2765 |
} |
|
2766 |
} |
|
2767 |
} |
|
2768 |
||
2769 |
// Give back the unused reserved pieces. |
|
2770 |
||
2771 |
for (int j = 0; j < i; ++j) { |
|
2772 |
if (base[j] != NULL) { |
|
2773 |
unmap_memory(base[j], size[j]); |
|
2774 |
} |
|
2775 |
} |
|
2776 |
||
2777 |
if (i < max_tries) { |
|
2778 |
_highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes); |
|
2779 |
return requested_addr; |
|
2780 |
} else { |
|
2781 |
_highest_vm_reserved_address = old_highest; |
|
2782 |
return NULL; |
|
2783 |
} |
|
2784 |
} |
|
2785 |
||
2786 |
size_t os::read(int fd, void *buf, unsigned int nBytes) { |
|
2787 |
return ::read(fd, buf, nBytes); |
|
2788 |
} |
|
2789 |
||
2790 |
// TODO-FIXME: reconcile Solaris' os::sleep with the linux variation. |
|
2791 |
// Solaris uses poll(), linux uses park(). |
|
2792 |
// Poll() is likely a better choice, assuming that Thread.interrupt() |
|
2793 |
// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with |
|
2794 |
// SIGSEGV, see 4355769. |
|
2795 |
||
2796 |
const int NANOSECS_PER_MILLISECS = 1000000; |
|
2797 |
||
2798 |
int os::sleep(Thread* thread, jlong millis, bool interruptible) { |
|
2799 |
assert(thread == Thread::current(), "thread consistency check"); |
|
2800 |
||
2801 |
ParkEvent * const slp = thread->_SleepEvent ; |
|
2802 |
slp->reset() ; |
|
2803 |
OrderAccess::fence() ; |
|
2804 |
||
2805 |
if (interruptible) { |
|
2806 |
jlong prevtime = javaTimeNanos(); |
|
2807 |
||
2808 |
for (;;) { |
|
2809 |
if (os::is_interrupted(thread, true)) { |
|
2810 |
return OS_INTRPT; |
|
2811 |
} |
|
2812 |
||
2813 |
jlong newtime = javaTimeNanos(); |
|
2814 |
||
2815 |
if (newtime - prevtime < 0) { |
|
2816 |
// time moving backwards, should only happen if no monotonic clock |
|
2817 |
// not a guarantee() because JVM should not abort on kernel/glibc bugs |
|
2818 |
assert(!Linux::supports_monotonic_clock(), "time moving backwards"); |
|
2819 |
} else { |
|
2820 |
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS; |
|
2821 |
} |
|
2822 |
||
2823 |
if(millis <= 0) { |
|
2824 |
return OS_OK; |
|
2825 |
} |
|
2826 |
||
2827 |
prevtime = newtime; |
|
2828 |
||
2829 |
{ |
|
2830 |
assert(thread->is_Java_thread(), "sanity check"); |
|
2831 |
JavaThread *jt = (JavaThread *) thread; |
|
2832 |
ThreadBlockInVM tbivm(jt); |
|
2833 |
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); |
|
2834 |
||
2835 |
jt->set_suspend_equivalent(); |
|
2836 |
// cleared by handle_special_suspend_equivalent_condition() or |
|
2837 |
// java_suspend_self() via check_and_wait_while_suspended() |
|
2838 |
||
2839 |
slp->park(millis); |
|
2840 |
||
2841 |
// were we externally suspended while we were waiting? |
|
2842 |
jt->check_and_wait_while_suspended(); |
|
2843 |
} |
|
2844 |
} |
|
2845 |
} else { |
|
2846 |
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); |
|
2847 |
jlong prevtime = javaTimeNanos(); |
|
2848 |
||
2849 |
for (;;) { |
|
2850 |
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on |
|
2851 |
// the 1st iteration ... |
|
2852 |
jlong newtime = javaTimeNanos(); |
|
2853 |
||
2854 |
if (newtime - prevtime < 0) { |
|
2855 |
// time moving backwards, should only happen if no monotonic clock |
|
2856 |
// not a guarantee() because JVM should not abort on kernel/glibc bugs |
|
2857 |
assert(!Linux::supports_monotonic_clock(), "time moving backwards"); |
|
2858 |
} else { |
|
2859 |
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS; |
|
2860 |
} |
|
2861 |
||
2862 |
if(millis <= 0) break ; |
|
2863 |
||
2864 |
prevtime = newtime; |
|
2865 |
slp->park(millis); |
|
2866 |
} |
|
2867 |
return OS_OK ; |
|
2868 |
} |
|
2869 |
} |
|
2870 |
||
2871 |
int os::naked_sleep() { |
|
2872 |
// %% make the sleep time an integer flag. for now use 1 millisec. |
|
2873 |
return os::sleep(Thread::current(), 1, false); |
|
2874 |
} |
|
2875 |
||
2876 |
// Sleep forever; naked call to OS-specific sleep; use with CAUTION |
|
2877 |
void os::infinite_sleep() { |
|
2878 |
while (true) { // sleep forever ... |
|
2879 |
::sleep(100); // ... 100 seconds at a time |
|
2880 |
} |
|
2881 |
} |
|
2882 |
||
2883 |
// Used to convert frequent JVM_Yield() to nops |
|
2884 |
bool os::dont_yield() { |
|
2885 |
return DontYieldALot; |
|
2886 |
} |
|
2887 |
||
2888 |
void os::yield() { |
|
2889 |
sched_yield(); |
|
2890 |
} |
|
2891 |
||
2892 |
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;} |
|
2893 |
||
2894 |
void os::yield_all(int attempts) { |
|
2895 |
// Yields to all threads, including threads with lower priorities |
|
2896 |
// Threads on Linux are all with same priority. The Solaris style |
|
2897 |
// os::yield_all() with nanosleep(1ms) is not necessary. |
|
2898 |
sched_yield(); |
|
2899 |
} |
|
2900 |
||
2901 |
// Called from the tight loops to possibly influence time-sharing heuristics |
|
2902 |
void os::loop_breaker(int attempts) { |
|
2903 |
os::yield_all(attempts); |
|
2904 |
} |
|
2905 |
||
2906 |
//////////////////////////////////////////////////////////////////////////////// |
|
2907 |
// thread priority support |
|
2908 |
||
2909 |
// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER |
|
2910 |
// only supports dynamic priority, static priority must be zero. For real-time |
|
2911 |
// applications, Linux supports SCHED_RR which allows static priority (1-99). |
|
2912 |
// However, for large multi-threaded applications, SCHED_RR is not only slower |
|
2913 |
// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out |
|
2914 |
// of 5 runs - Sep 2005). |
|
2915 |
// |
|
2916 |
// The following code actually changes the niceness of kernel-thread/LWP. It |
|
2917 |
// has an assumption that setpriority() only modifies one kernel-thread/LWP, |
|
2918 |
// not the entire user process, and user level threads are 1:1 mapped to kernel |
|
2919 |
// threads. It has always been the case, but could change in the future. For |
|
2920 |
// this reason, the code should not be used as default (ThreadPriorityPolicy=0). |
|
2921 |
// It is only used when ThreadPriorityPolicy=1 and requires root privilege. |
|
2922 |
||
2923 |
int os::java_to_os_priority[MaxPriority + 1] = { |
|
2924 |
19, // 0 Entry should never be used |
|
2925 |
||
2926 |
4, // 1 MinPriority |
|
2927 |
3, // 2 |
|
2928 |
2, // 3 |
|
2929 |
||
2930 |
1, // 4 |
|
2931 |
0, // 5 NormPriority |
|
2932 |
-1, // 6 |
|
2933 |
||
2934 |
-2, // 7 |
|
2935 |
-3, // 8 |
|
2936 |
-4, // 9 NearMaxPriority |
|
2937 |
||
2938 |
-5 // 10 MaxPriority |
|
2939 |
}; |
|
2940 |
||
2941 |
static int prio_init() { |
|
2942 |
if (ThreadPriorityPolicy == 1) { |
|
2943 |
// Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1 |
|
2944 |
// if effective uid is not root. Perhaps, a more elegant way of doing |
|
2945 |
// this is to test CAP_SYS_NICE capability, but that will require libcap.so |
|
2946 |
if (geteuid() != 0) { |
|
2947 |
if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) { |
|
2948 |
warning("-XX:ThreadPriorityPolicy requires root privilege on Linux"); |
|
2949 |
} |
|
2950 |
ThreadPriorityPolicy = 0; |
|
2951 |
} |
|
2952 |
} |
|
2953 |
return 0; |
|
2954 |
} |
|
2955 |
||
2956 |
OSReturn os::set_native_priority(Thread* thread, int newpri) { |
|
2957 |
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK; |
|
2958 |
||
2959 |
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri); |
|
2960 |
return (ret == 0) ? OS_OK : OS_ERR; |
|
2961 |
} |
|
2962 |
||
2963 |
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { |
|
2964 |
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) { |
|
2965 |
*priority_ptr = java_to_os_priority[NormPriority]; |
|
2966 |
return OS_OK; |
|
2967 |
} |
|
2968 |
||
2969 |
errno = 0; |
|
2970 |
*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id()); |
|
2971 |
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); |
|
2972 |
} |
|
2973 |
||
2974 |
// Hint to the underlying OS that a task switch would not be good. |
|
2975 |
// Void return because it's a hint and can fail. |
|
2976 |
void os::hint_no_preempt() {} |
|
2977 |
||
2978 |
//////////////////////////////////////////////////////////////////////////////// |
|
2979 |
// suspend/resume support |
|
2980 |
||
2981 |
// the low-level signal-based suspend/resume support is a remnant from the |
|
2982 |
// old VM-suspension that used to be for java-suspension, safepoints etc, |
|
2983 |
// within hotspot. Now there is a single use-case for this: |
|
2984 |
// - calling get_thread_pc() on the VMThread by the flat-profiler task |
|
2985 |
// that runs in the watcher thread. |
|
2986 |
// The remaining code is greatly simplified from the more general suspension |
|
2987 |
// code that used to be used. |
|
2988 |
// |
|
2989 |
// The protocol is quite simple: |
|
2990 |
// - suspend: |
|
2991 |
// - sends a signal to the target thread |
|
2992 |
// - polls the suspend state of the osthread using a yield loop |
|
2993 |
// - target thread signal handler (SR_handler) sets suspend state |
|
2994 |
// and blocks in sigsuspend until continued |
|
2995 |
// - resume: |
|
2996 |
// - sets target osthread state to continue |
|
2997 |
// - sends signal to end the sigsuspend loop in the SR_handler |
|
2998 |
// |
|
2999 |
// Note that the SR_lock plays no role in this suspend/resume protocol. |
|
3000 |
// |
|
3001 |
||
3002 |
static void resume_clear_context(OSThread *osthread) { |
|
3003 |
osthread->set_ucontext(NULL); |
|
3004 |
osthread->set_siginfo(NULL); |
|
3005 |
||
3006 |
// notify the suspend action is completed, we have now resumed |
|
3007 |
osthread->sr.clear_suspended(); |
|
3008 |
} |
|
3009 |
||
3010 |
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { |
|
3011 |
osthread->set_ucontext(context); |
|
3012 |
osthread->set_siginfo(siginfo); |
|
3013 |
} |
|
3014 |
||
3015 |
// |
|
3016 |
// Handler function invoked when a thread's execution is suspended or |
|
3017 |
// resumed. We have to be careful that only async-safe functions are |
|
3018 |
// called here (Note: most pthread functions are not async safe and |
|
3019 |
// should be avoided.) |
|
3020 |
// |
|
3021 |
// Note: sigwait() is a more natural fit than sigsuspend() from an |
|
3022 |
// interface point of view, but sigwait() prevents the signal hander |
|
3023 |
// from being run. libpthread would get very confused by not having |
|
3024 |
// its signal handlers run and prevents sigwait()'s use with the |
|
3025 |
// mutex granting granting signal. |
|
3026 |
// |
|
3027 |
// Currently only ever called on the VMThread |
|
3028 |
// |
|
3029 |
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { |
|
3030 |
// Save and restore errno to avoid confusing native code with EINTR |
|
3031 |
// after sigsuspend. |
|
3032 |
int old_errno = errno; |
|
3033 |
||
3034 |
Thread* thread = Thread::current(); |
|
3035 |
OSThread* osthread = thread->osthread(); |
|
3036 |
assert(thread->is_VM_thread(), "Must be VMThread"); |
|
3037 |
// read current suspend action |
|
3038 |
int action = osthread->sr.suspend_action(); |
|
3039 |
if (action == SR_SUSPEND) { |
|
3040 |
suspend_save_context(osthread, siginfo, context); |
|
3041 |
||
3042 |
// Notify the suspend action is about to be completed. do_suspend() |
|
3043 |
// waits until SR_SUSPENDED is set and then returns. We will wait |
|
3044 |
// here for a resume signal and that completes the suspend-other |
|
3045 |
// action. do_suspend/do_resume is always called as a pair from |
|
3046 |
// the same thread - so there are no races |
|
3047 |
||
3048 |
// notify the caller |
|
3049 |
osthread->sr.set_suspended(); |
|
3050 |
||
3051 |
sigset_t suspend_set; // signals for sigsuspend() |
|
3052 |
||
3053 |
// get current set of blocked signals and unblock resume signal |
|
3054 |
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); |
|
3055 |
sigdelset(&suspend_set, SR_signum); |
|
3056 |
||
3057 |
// wait here until we are resumed |
|
3058 |
do { |
|
3059 |
sigsuspend(&suspend_set); |
|
3060 |
// ignore all returns until we get a resume signal |
|
3061 |
} while (osthread->sr.suspend_action() != SR_CONTINUE); |
|
3062 |
||
3063 |
resume_clear_context(osthread); |
|
3064 |
||
3065 |
} else { |
|
3066 |
assert(action == SR_CONTINUE, "unexpected sr action"); |
|
3067 |
// nothing special to do - just leave the handler |
|
3068 |
} |
|
3069 |
||
3070 |
errno = old_errno; |
|
3071 |
} |
|
3072 |
||
3073 |
||
3074 |
static int SR_initialize() { |
|
3075 |
struct sigaction act; |
|
3076 |
char *s; |
|
3077 |
/* Get signal number to use for suspend/resume */ |
|
3078 |
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { |
|
3079 |
int sig = ::strtol(s, 0, 10); |
|
3080 |
if (sig > 0 || sig < _NSIG) { |
|
3081 |
SR_signum = sig; |
|
3082 |
} |
|
3083 |
} |
|
3084 |
||
3085 |
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, |
|
3086 |
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); |
|
3087 |
||
3088 |
sigemptyset(&SR_sigset); |
|
3089 |
sigaddset(&SR_sigset, SR_signum); |
|
3090 |
||
3091 |
/* Set up signal handler for suspend/resume */ |
|
3092 |
act.sa_flags = SA_RESTART|SA_SIGINFO; |
|
3093 |
act.sa_handler = (void (*)(int)) SR_handler; |
|
3094 |
||
3095 |
// SR_signum is blocked by default. |
|
3096 |
// 4528190 - We also need to block pthread restart signal (32 on all |
|
3097 |
// supported Linux platforms). Note that LinuxThreads need to block |
|
3098 |
// this signal for all threads to work properly. So we don't have |
|
3099 |
// to use hard-coded signal number when setting up the mask. |
|
3100 |
pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); |
|
3101 |
||
3102 |
if (sigaction(SR_signum, &act, 0) == -1) { |
|
3103 |
return -1; |
|
3104 |
} |
|
3105 |
||
3106 |
// Save signal flag |
|
3107 |
os::Linux::set_our_sigflags(SR_signum, act.sa_flags); |
|
3108 |
return 0; |
|
3109 |
} |
|
3110 |
||
3111 |
static int SR_finalize() { |
|
3112 |
return 0; |
|
3113 |
} |
|
3114 |
||
3115 |
||
3116 |
// returns true on success and false on error - really an error is fatal |
|
3117 |
// but this seems the normal response to library errors |
|
3118 |
static bool do_suspend(OSThread* osthread) { |
|
3119 |
// mark as suspended and send signal |
|
3120 |
osthread->sr.set_suspend_action(SR_SUSPEND); |
|
3121 |
int status = pthread_kill(osthread->pthread_id(), SR_signum); |
|
3122 |
assert_status(status == 0, status, "pthread_kill"); |
|
3123 |
||
3124 |
// check status and wait until notified of suspension |
|
3125 |
if (status == 0) { |
|
3126 |
for (int i = 0; !osthread->sr.is_suspended(); i++) { |
|
3127 |
os::yield_all(i); |
|
3128 |
} |
|
3129 |
osthread->sr.set_suspend_action(SR_NONE); |
|
3130 |
return true; |
|
3131 |
} |
|
3132 |
else { |
|
3133 |
osthread->sr.set_suspend_action(SR_NONE); |
|
3134 |
return false; |
|
3135 |
} |
|
3136 |
} |
|
3137 |
||
3138 |
static void do_resume(OSThread* osthread) { |
|
3139 |
assert(osthread->sr.is_suspended(), "thread should be suspended"); |
|
3140 |
osthread->sr.set_suspend_action(SR_CONTINUE); |
|
3141 |
||
3142 |
int status = pthread_kill(osthread->pthread_id(), SR_signum); |
|
3143 |
assert_status(status == 0, status, "pthread_kill"); |
|
3144 |
// check status and wait unit notified of resumption |
|
3145 |
if (status == 0) { |
|
3146 |
for (int i = 0; osthread->sr.is_suspended(); i++) { |
|
3147 |
os::yield_all(i); |
|
3148 |
} |
|
3149 |
} |
|
3150 |
osthread->sr.set_suspend_action(SR_NONE); |
|
3151 |
} |
|
3152 |
||
3153 |
//////////////////////////////////////////////////////////////////////////////// |
|
3154 |
// interrupt support |
|
3155 |
||
3156 |
void os::interrupt(Thread* thread) { |
|
3157 |
assert(Thread::current() == thread || Threads_lock->owned_by_self(), |
|
3158 |
"possibility of dangling Thread pointer"); |
|
3159 |
||
3160 |
OSThread* osthread = thread->osthread(); |
|
3161 |
||
3162 |
if (!osthread->interrupted()) { |
|
3163 |
osthread->set_interrupted(true); |
|
3164 |
// More than one thread can get here with the same value of osthread, |
|
3165 |
// resulting in multiple notifications. We do, however, want the store |
|
3166 |
// to interrupted() to be visible to other threads before we execute unpark(). |
|
3167 |
OrderAccess::fence(); |
|
3168 |
ParkEvent * const slp = thread->_SleepEvent ; |
|
3169 |
if (slp != NULL) slp->unpark() ; |
|
3170 |
} |
|
3171 |
||
3172 |
// For JSR166. Unpark even if interrupt status already was set |
|
3173 |
if (thread->is_Java_thread()) |
|
3174 |
((JavaThread*)thread)->parker()->unpark(); |
|
3175 |
||
3176 |
ParkEvent * ev = thread->_ParkEvent ; |
|
3177 |
if (ev != NULL) ev->unpark() ; |
|
3178 |
||
3179 |
} |
|
3180 |
||
3181 |
bool os::is_interrupted(Thread* thread, bool clear_interrupted) { |
|
3182 |
assert(Thread::current() == thread || Threads_lock->owned_by_self(), |
|
3183 |
"possibility of dangling Thread pointer"); |
|
3184 |
||
3185 |
OSThread* osthread = thread->osthread(); |
|
3186 |
||
3187 |
bool interrupted = osthread->interrupted(); |
|
3188 |
||
3189 |
if (interrupted && clear_interrupted) { |
|
3190 |
osthread->set_interrupted(false); |
|
3191 |
// consider thread->_SleepEvent->reset() ... optional optimization |
|
3192 |
} |
|
3193 |
||
3194 |
return interrupted; |
|
3195 |
} |
|
3196 |
||
3197 |
/////////////////////////////////////////////////////////////////////////////////// |
|
3198 |
// signal handling (except suspend/resume) |
|
3199 |
||
3200 |
// This routine may be used by user applications as a "hook" to catch signals. |
|
3201 |
// The user-defined signal handler must pass unrecognized signals to this |
|
3202 |
// routine, and if it returns true (non-zero), then the signal handler must |
|
3203 |
// return immediately. If the flag "abort_if_unrecognized" is true, then this |
|
3204 |
// routine will never retun false (zero), but instead will execute a VM panic |
|
3205 |
// routine kill the process. |
|
3206 |
// |
|
3207 |
// If this routine returns false, it is OK to call it again. This allows |
|
3208 |
// the user-defined signal handler to perform checks either before or after |
|
3209 |
// the VM performs its own checks. Naturally, the user code would be making |
|
3210 |
// a serious error if it tried to handle an exception (such as a null check |
|
3211 |
// or breakpoint) that the VM was generating for its own correct operation. |
|
3212 |
// |
|
3213 |
// This routine may recognize any of the following kinds of signals: |
|
3214 |
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. |
|
3215 |
// It should be consulted by handlers for any of those signals. |
|
3216 |
// |
|
3217 |
// The caller of this routine must pass in the three arguments supplied |
|
3218 |
// to the function referred to in the "sa_sigaction" (not the "sa_handler") |
|
3219 |
// field of the structure passed to sigaction(). This routine assumes that |
|
3220 |
// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. |
|
3221 |
// |
|
3222 |
// Note that the VM will print warnings if it detects conflicting signal |
|
3223 |
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". |
|
3224 |
// |
|
3225 |
extern "C" int |
|
3226 |
JVM_handle_linux_signal(int signo, siginfo_t* siginfo, |
|
3227 |
void* ucontext, int abort_if_unrecognized); |
|
3228 |
||
3229 |
void signalHandler(int sig, siginfo_t* info, void* uc) { |
|
3230 |
assert(info != NULL && uc != NULL, "it must be old kernel"); |
|
3231 |
JVM_handle_linux_signal(sig, info, uc, true); |
|
3232 |
} |
|
3233 |
||
3234 |
||
3235 |
// This boolean allows users to forward their own non-matching signals |
|
3236 |
// to JVM_handle_linux_signal, harmlessly. |
|
3237 |
bool os::Linux::signal_handlers_are_installed = false; |
|
3238 |
||
3239 |
// For signal-chaining |
|
3240 |
struct sigaction os::Linux::sigact[MAXSIGNUM]; |
|
3241 |
unsigned int os::Linux::sigs = 0; |
|
3242 |
bool os::Linux::libjsig_is_loaded = false; |
|
3243 |
typedef struct sigaction *(*get_signal_t)(int); |
|
3244 |
get_signal_t os::Linux::get_signal_action = NULL; |
|
3245 |
||
3246 |
struct sigaction* os::Linux::get_chained_signal_action(int sig) { |
|
3247 |
struct sigaction *actp = NULL; |
|
3248 |
||
3249 |
if (libjsig_is_loaded) { |
|
3250 |
// Retrieve the old signal handler from libjsig |
|
3251 |
actp = (*get_signal_action)(sig); |
|
3252 |
} |
|
3253 |
if (actp == NULL) { |
|
3254 |
// Retrieve the preinstalled signal handler from jvm |
|
3255 |
actp = get_preinstalled_handler(sig); |
|
3256 |
} |
|
3257 |
||
3258 |
return actp; |
|
3259 |
} |
|
3260 |
||
3261 |
static bool call_chained_handler(struct sigaction *actp, int sig, |
|
3262 |
siginfo_t *siginfo, void *context) { |
|
3263 |
// Call the old signal handler |
|
3264 |
if (actp->sa_handler == SIG_DFL) { |
|
3265 |
// It's more reasonable to let jvm treat it as an unexpected exception |
|
3266 |
// instead of taking the default action. |
|
3267 |
return false; |
|
3268 |
} else if (actp->sa_handler != SIG_IGN) { |
|
3269 |
if ((actp->sa_flags & SA_NODEFER) == 0) { |
|
3270 |
// automaticlly block the signal |
|
3271 |
sigaddset(&(actp->sa_mask), sig); |
|
3272 |
} |
|
3273 |
||
3274 |
sa_handler_t hand; |
|
3275 |
sa_sigaction_t sa; |
|
3276 |
bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; |
|
3277 |
// retrieve the chained handler |
|
3278 |
if (siginfo_flag_set) { |
|
3279 |
sa = actp->sa_sigaction; |
|
3280 |
} else { |
|
3281 |
hand = actp->sa_handler; |
|
3282 |
} |
|
3283 |
||
3284 |
if ((actp->sa_flags & SA_RESETHAND) != 0) { |
|
3285 |
actp->sa_handler = SIG_DFL; |
|
3286 |
} |
|
3287 |
||
3288 |
// try to honor the signal mask |
|
3289 |
sigset_t oset; |
|
3290 |
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); |
|
3291 |
||
3292 |
// call into the chained handler |
|
3293 |
if (siginfo_flag_set) { |
|
3294 |
(*sa)(sig, siginfo, context); |
|
3295 |
} else { |
|
3296 |
(*hand)(sig); |
|
3297 |
} |
|
3298 |
||
3299 |
// restore the signal mask |
|
3300 |
pthread_sigmask(SIG_SETMASK, &oset, 0); |
|
3301 |
} |
|
3302 |
// Tell jvm's signal handler the signal is taken care of. |
|
3303 |
return true; |
|
3304 |
} |
|
3305 |
||
3306 |
bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) { |
|
3307 |
bool chained = false; |
|
3308 |
// signal-chaining |
|
3309 |
if (UseSignalChaining) { |
|
3310 |
struct sigaction *actp = get_chained_signal_action(sig); |
|
3311 |
if (actp != NULL) { |
|
3312 |
chained = call_chained_handler(actp, sig, siginfo, context); |
|
3313 |
} |
|
3314 |
} |
|
3315 |
return chained; |
|
3316 |
} |
|
3317 |
||
3318 |
struct sigaction* os::Linux::get_preinstalled_handler(int sig) { |
|
3319 |
if ((( (unsigned int)1 << sig ) & sigs) != 0) { |
|
3320 |
return &sigact[sig]; |
|
3321 |
} |
|
3322 |
return NULL; |
|
3323 |
} |
|
3324 |
||
3325 |
void os::Linux::save_preinstalled_handler(int sig, struct sigaction& oldAct) { |
|
3326 |
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); |
|
3327 |
sigact[sig] = oldAct; |
|
3328 |
sigs |= (unsigned int)1 << sig; |
|
3329 |
} |
|
3330 |
||
3331 |
// for diagnostic |
|
3332 |
int os::Linux::sigflags[MAXSIGNUM]; |
|
3333 |
||
3334 |
int os::Linux::get_our_sigflags(int sig) { |
|
3335 |
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); |
|
3336 |
return sigflags[sig]; |
|
3337 |
} |
|
3338 |
||
3339 |
void os::Linux::set_our_sigflags(int sig, int flags) { |
|
3340 |
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); |
|
3341 |
sigflags[sig] = flags; |
|
3342 |
} |
|
3343 |
||
3344 |
void os::Linux::set_signal_handler(int sig, bool set_installed) { |
|
3345 |
// Check for overwrite. |
|
3346 |
struct sigaction oldAct; |
|
3347 |
sigaction(sig, (struct sigaction*)NULL, &oldAct); |
|
3348 |
||
3349 |
void* oldhand = oldAct.sa_sigaction |
|
3350 |
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) |
|
3351 |
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler); |
|
3352 |
if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && |
|
3353 |
oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && |
|
3354 |
oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) { |
|
3355 |
if (AllowUserSignalHandlers || !set_installed) { |
|
3356 |
// Do not overwrite; user takes responsibility to forward to us. |
|
3357 |
return; |
|
3358 |
} else if (UseSignalChaining) { |
|
3359 |
// save the old handler in jvm |
|
3360 |
save_preinstalled_handler(sig, oldAct); |
|
3361 |
// libjsig also interposes the sigaction() call below and saves the |
|
3362 |
// old sigaction on it own. |
|
3363 |
} else { |
|
3364 |
fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig); |
|
3365 |
} |
|
3366 |
} |
|
3367 |
||
3368 |
struct sigaction sigAct; |
|
3369 |
sigfillset(&(sigAct.sa_mask)); |
|
3370 |
sigAct.sa_handler = SIG_DFL; |
|
3371 |
if (!set_installed) { |
|
3372 |
sigAct.sa_flags = SA_SIGINFO|SA_RESTART; |
|
3373 |
} else { |
|
3374 |
sigAct.sa_sigaction = signalHandler; |
|
3375 |
sigAct.sa_flags = SA_SIGINFO|SA_RESTART; |
|
3376 |
} |
|
3377 |
// Save flags, which are set by ours |
|
3378 |
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); |
|
3379 |
sigflags[sig] = sigAct.sa_flags; |
|
3380 |
||
3381 |
int ret = sigaction(sig, &sigAct, &oldAct); |
|
3382 |
assert(ret == 0, "check"); |
|
3383 |
||
3384 |
void* oldhand2 = oldAct.sa_sigaction |
|
3385 |
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) |
|
3386 |
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler); |
|
3387 |
assert(oldhand2 == oldhand, "no concurrent signal handler installation"); |
|
3388 |
} |
|
3389 |
||
3390 |
// install signal handlers for signals that HotSpot needs to |
|
3391 |
// handle in order to support Java-level exception handling. |
|
3392 |
||
3393 |
void os::Linux::install_signal_handlers() { |
|
3394 |
if (!signal_handlers_are_installed) { |
|
3395 |
signal_handlers_are_installed = true; |
|
3396 |
||
3397 |
// signal-chaining |
|
3398 |
typedef void (*signal_setting_t)(); |
|
3399 |
signal_setting_t begin_signal_setting = NULL; |
|
3400 |
signal_setting_t end_signal_setting = NULL; |
|
3401 |
begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, |
|
3402 |
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); |
|
3403 |
if (begin_signal_setting != NULL) { |
|
3404 |
end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, |
|
3405 |
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); |
|
3406 |
get_signal_action = CAST_TO_FN_PTR(get_signal_t, |
|
3407 |
dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); |
|
3408 |
libjsig_is_loaded = true; |
|
3409 |
assert(UseSignalChaining, "should enable signal-chaining"); |
|
3410 |
} |
|
3411 |
if (libjsig_is_loaded) { |
|
3412 |
// Tell libjsig jvm is setting signal handlers |
|
3413 |
(*begin_signal_setting)(); |
|
3414 |
} |
|
3415 |
||
3416 |
set_signal_handler(SIGSEGV, true); |
|
3417 |
set_signal_handler(SIGPIPE, true); |
|
3418 |
set_signal_handler(SIGBUS, true); |
|
3419 |
set_signal_handler(SIGILL, true); |
|
3420 |
set_signal_handler(SIGFPE, true); |
|
3421 |
set_signal_handler(SIGXFSZ, true); |
|
3422 |
||
3423 |
if (libjsig_is_loaded) { |
|
3424 |
// Tell libjsig jvm finishes setting signal handlers |
|
3425 |
(*end_signal_setting)(); |
|
3426 |
} |
|
3427 |
||
3428 |
// We don't activate signal checker if libjsig is in place, we trust ourselves |
|
3429 |
// and if UserSignalHandler is installed all bets are off |
|
3430 |
if (CheckJNICalls) { |
|
3431 |
if (libjsig_is_loaded) { |
|
3432 |
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); |
|
3433 |
check_signals = false; |
|
3434 |
} |
|
3435 |
if (AllowUserSignalHandlers) { |
|
3436 |
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); |
|
3437 |
check_signals = false; |
|
3438 |
} |
|
3439 |
} |
|
3440 |
} |
|
3441 |
} |
|
3442 |
||
3443 |
// This is the fastest way to get thread cpu time on Linux. |
|
3444 |
// Returns cpu time (user+sys) for any thread, not only for current. |
|
3445 |
// POSIX compliant clocks are implemented in the kernels 2.6.16+. |
|
3446 |
// It might work on 2.6.10+ with a special kernel/glibc patch. |
|
3447 |
// For reference, please, see IEEE Std 1003.1-2004: |
|
3448 |
// http://www.unix.org/single_unix_specification |
|
3449 |
||
3450 |
jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) { |
|
3451 |
struct timespec tp; |
|
3452 |
int rc = os::Linux::clock_gettime(clockid, &tp); |
|
3453 |
assert(rc == 0, "clock_gettime is expected to return 0 code"); |
|
3454 |
||
3455 |
return (tp.tv_sec * SEC_IN_NANOSECS) + tp.tv_nsec; |
|
3456 |
} |
|
3457 |
||
3458 |
///// |
|
3459 |
// glibc on Linux platform uses non-documented flag |
|
3460 |
// to indicate, that some special sort of signal |
|
3461 |
// trampoline is used. |
|
3462 |
// We will never set this flag, and we should |
|
3463 |
// ignore this flag in our diagnostic |
|
3464 |
#ifdef SIGNIFICANT_SIGNAL_MASK |
|
3465 |
#undef SIGNIFICANT_SIGNAL_MASK |
|
3466 |
#endif |
|
3467 |
#define SIGNIFICANT_SIGNAL_MASK (~0x04000000) |
|
3468 |
||
3469 |
static const char* get_signal_handler_name(address handler, |
|
3470 |
char* buf, int buflen) { |
|
3471 |
int offset; |
|
3472 |
bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); |
|
3473 |
if (found) { |
|
3474 |
// skip directory names |
|
3475 |
const char *p1, *p2; |
|
3476 |
p1 = buf; |
|
3477 |
size_t len = strlen(os::file_separator()); |
|
3478 |
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; |
|
3479 |
jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); |
|
3480 |
} else { |
|
3481 |
jio_snprintf(buf, buflen, PTR_FORMAT, handler); |
|
3482 |
} |
|
3483 |
return buf; |
|
3484 |
} |
|
3485 |
||
3486 |
static void print_signal_handler(outputStream* st, int sig, |
|
3487 |
char* buf, size_t buflen) { |
|
3488 |
struct sigaction sa; |
|
3489 |
||
3490 |
sigaction(sig, NULL, &sa); |
|
3491 |
||
3492 |
// See comment for SIGNIFICANT_SIGNAL_MASK define |
|
3493 |
sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK; |
|
3494 |
||
3495 |
st->print("%s: ", os::exception_name(sig, buf, buflen)); |
|
3496 |
||
3497 |
address handler = (sa.sa_flags & SA_SIGINFO) |
|
3498 |
? CAST_FROM_FN_PTR(address, sa.sa_sigaction) |
|
3499 |
: CAST_FROM_FN_PTR(address, sa.sa_handler); |
|
3500 |
||
3501 |
if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { |
|
3502 |
st->print("SIG_DFL"); |
|
3503 |
} else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { |
|
3504 |
st->print("SIG_IGN"); |
|
3505 |
} else { |
|
3506 |
st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); |
|
3507 |
} |
|
3508 |
||
3509 |
st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); |
|
3510 |
||
3511 |
address rh = VMError::get_resetted_sighandler(sig); |
|
3512 |
// May be, handler was resetted by VMError? |
|
3513 |
if(rh != NULL) { |
|
3514 |
handler = rh; |
|
3515 |
sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK; |
|
3516 |
} |
|
3517 |
||
3518 |
st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); |
|
3519 |
||
3520 |
// Check: is it our handler? |
|
3521 |
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) || |
|
3522 |
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { |
|
3523 |
// It is our signal handler |
|
3524 |
// check for flags, reset system-used one! |
|
3525 |
if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) { |
|
3526 |
st->print( |
|
3527 |
", flags was changed from " PTR32_FORMAT ", consider using jsig library", |
|
3528 |
os::Linux::get_our_sigflags(sig)); |
|
3529 |
} |
|
3530 |
} |
|
3531 |
st->cr(); |
|
3532 |
} |
|
3533 |
||
3534 |
||
3535 |
#define DO_SIGNAL_CHECK(sig) \ |
|
3536 |
if (!sigismember(&check_signal_done, sig)) \ |
|
3537 |
os::Linux::check_signal_handler(sig) |
|
3538 |
||
3539 |
// This method is a periodic task to check for misbehaving JNI applications |
|
3540 |
// under CheckJNI, we can add any periodic checks here |
|
3541 |
||
3542 |
void os::run_periodic_checks() { |
|
3543 |
||
3544 |
if (check_signals == false) return; |
|
3545 |
||
3546 |
// SEGV and BUS if overridden could potentially prevent |
|
3547 |
// generation of hs*.log in the event of a crash, debugging |
|
3548 |
// such a case can be very challenging, so we absolutely |
|
3549 |
// check the following for a good measure: |
|
3550 |
DO_SIGNAL_CHECK(SIGSEGV); |
|
3551 |
DO_SIGNAL_CHECK(SIGILL); |
|
3552 |
DO_SIGNAL_CHECK(SIGFPE); |
|
3553 |
DO_SIGNAL_CHECK(SIGBUS); |
|
3554 |
DO_SIGNAL_CHECK(SIGPIPE); |
|
3555 |
DO_SIGNAL_CHECK(SIGXFSZ); |
|
3556 |
||
3557 |
||
3558 |
// ReduceSignalUsage allows the user to override these handlers |
|
3559 |
// see comments at the very top and jvm_solaris.h |
|
3560 |
if (!ReduceSignalUsage) { |
|
3561 |
DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); |
|
3562 |
DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); |
|
3563 |
DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); |
|
3564 |
DO_SIGNAL_CHECK(BREAK_SIGNAL); |
|
3565 |
} |
|
3566 |
||
3567 |
DO_SIGNAL_CHECK(SR_signum); |
|
3568 |
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL); |
|
3569 |
} |
|
3570 |
||
3571 |
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); |
|
3572 |
||
3573 |
static os_sigaction_t os_sigaction = NULL; |
|
3574 |
||
3575 |
void os::Linux::check_signal_handler(int sig) { |
|
3576 |
char buf[O_BUFLEN]; |
|
3577 |
address jvmHandler = NULL; |
|
3578 |
||
3579 |
||
3580 |
struct sigaction act; |
|
3581 |
if (os_sigaction == NULL) { |
|
3582 |
// only trust the default sigaction, in case it has been interposed |
|
3583 |
os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); |
|
3584 |
if (os_sigaction == NULL) return; |
|
3585 |
} |
|
3586 |
||
3587 |
os_sigaction(sig, (struct sigaction*)NULL, &act); |
|
3588 |
||
3589 |
||
3590 |
act.sa_flags &= SIGNIFICANT_SIGNAL_MASK; |
|
3591 |
||
3592 |
address thisHandler = (act.sa_flags & SA_SIGINFO) |
|
3593 |
? CAST_FROM_FN_PTR(address, act.sa_sigaction) |
|
3594 |
: CAST_FROM_FN_PTR(address, act.sa_handler) ; |
|
3595 |
||
3596 |
||
3597 |
switch(sig) { |
|
3598 |
case SIGSEGV: |
|
3599 |
case SIGBUS: |
|
3600 |
case SIGFPE: |
|
3601 |
case SIGPIPE: |
|
3602 |
case SIGILL: |
|
3603 |
case SIGXFSZ: |
|
3604 |
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler); |
|
3605 |
break; |
|
3606 |
||
3607 |
case SHUTDOWN1_SIGNAL: |
|
3608 |
case SHUTDOWN2_SIGNAL: |
|
3609 |
case SHUTDOWN3_SIGNAL: |
|
3610 |
case BREAK_SIGNAL: |
|
3611 |
jvmHandler = (address)user_handler(); |
|
3612 |
break; |
|
3613 |
||
3614 |
case INTERRUPT_SIGNAL: |
|
3615 |
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL); |
|
3616 |
break; |
|
3617 |
||
3618 |
default: |
|
3619 |
if (sig == SR_signum) { |
|
3620 |
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); |
|
3621 |
} else { |
|
3622 |
return; |
|
3623 |
} |
|
3624 |
break; |
|
3625 |
} |
|
3626 |
||
3627 |
if (thisHandler != jvmHandler) { |
|
3628 |
tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); |
|
3629 |
tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); |
|
3630 |
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); |
|
3631 |
// No need to check this sig any longer |
|
3632 |
sigaddset(&check_signal_done, sig); |
|
3633 |
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) { |
|
3634 |
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); |
|
3635 |
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig)); |
|
3636 |
tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); |
|
3637 |
// No need to check this sig any longer |
|
3638 |
sigaddset(&check_signal_done, sig); |
|
3639 |
} |
|
3640 |
||
3641 |
// Dump all the signal |
|
3642 |
if (sigismember(&check_signal_done, sig)) { |
|
3643 |
print_signal_handlers(tty, buf, O_BUFLEN); |
|
3644 |
} |
|
3645 |
} |
|
3646 |
||
3647 |
extern void report_error(char* file_name, int line_no, char* title, char* format, ...); |
|
3648 |
||
3649 |
extern bool signal_name(int signo, char* buf, size_t len); |
|
3650 |
||
3651 |
const char* os::exception_name(int exception_code, char* buf, size_t size) { |
|
3652 |
if (0 < exception_code && exception_code <= SIGRTMAX) { |
|
3653 |
// signal |
|
3654 |
if (!signal_name(exception_code, buf, size)) { |
|
3655 |
jio_snprintf(buf, size, "SIG%d", exception_code); |
|
3656 |
} |
|
3657 |
return buf; |
|
3658 |
} else { |
|
3659 |
return NULL; |
|
3660 |
} |
|
3661 |
} |
|
3662 |
||
3663 |
// this is called _before_ the most of global arguments have been parsed |
|
3664 |
void os::init(void) { |
|
3665 |
char dummy; /* used to get a guess on initial stack address */ |
|
3666 |
// first_hrtime = gethrtime(); |
|
3667 |
||
3668 |
// With LinuxThreads the JavaMain thread pid (primordial thread) |
|
3669 |
// is different than the pid of the java launcher thread. |
|
3670 |
// So, on Linux, the launcher thread pid is passed to the VM |
|
3671 |
// via the sun.java.launcher.pid property. |
|
3672 |
// Use this property instead of getpid() if it was correctly passed. |
|
3673 |
// See bug 6351349. |
|
3674 |
pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid(); |
|
3675 |
||
3676 |
_initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid(); |
|
3677 |
||
3678 |
clock_tics_per_sec = sysconf(_SC_CLK_TCK); |
|
3679 |
||
3680 |
init_random(1234567); |
|
3681 |
||
3682 |
ThreadCritical::initialize(); |
|
3683 |
||
3684 |
Linux::set_page_size(sysconf(_SC_PAGESIZE)); |
|
3685 |
if (Linux::page_size() == -1) { |
|
3686 |
fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno)); |
|
3687 |
} |
|
3688 |
init_page_sizes((size_t) Linux::page_size()); |
|
3689 |
||
3690 |
Linux::initialize_system_info(); |
|
3691 |
||
3692 |
// main_thread points to the aboriginal thread |
|
3693 |
Linux::_main_thread = pthread_self(); |
|
3694 |
||
3695 |
Linux::clock_init(); |
|
3696 |
initial_time_count = os::elapsed_counter(); |
|
950 | 3697 |
pthread_mutex_init(&dl_mutex, NULL); |
1 | 3698 |
} |
3699 |
||
3700 |
// To install functions for atexit system call |
|
3701 |
extern "C" { |
|
3702 |
static void perfMemory_exit_helper() { |
|
3703 |
perfMemory_exit(); |
|
3704 |
} |
|
3705 |
} |
|
3706 |
||
3707 |
// this is called _after_ the global arguments have been parsed |
|
3708 |
jint os::init_2(void) |
|
3709 |
{ |
|
3710 |
Linux::fast_thread_clock_init(); |
|
3711 |
||
3712 |
// Allocate a single page and mark it as readable for safepoint polling |
|
3713 |
address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
|
3714 |
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" ); |
|
3715 |
||
3716 |
os::set_polling_page( polling_page ); |
|
3717 |
||
3718 |
#ifndef PRODUCT |
|
3719 |
if(Verbose && PrintMiscellaneous) |
|
3720 |
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); |
|
3721 |
#endif |
|
3722 |
||
3723 |
if (!UseMembar) { |
|
3724 |
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
|
3725 |
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); |
|
3726 |
os::set_memory_serialize_page( mem_serialize_page ); |
|
3727 |
||
3728 |
#ifndef PRODUCT |
|
3729 |
if(Verbose && PrintMiscellaneous) |
|
3730 |
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); |
|
3731 |
#endif |
|
3732 |
} |
|
3733 |
||
3734 |
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); |
|
3735 |
||
3736 |
// initialize suspend/resume support - must do this before signal_sets_init() |
|
3737 |
if (SR_initialize() != 0) { |
|
3738 |
perror("SR_initialize failed"); |
|
3739 |
return JNI_ERR; |
|
3740 |
} |
|
3741 |
||
3742 |
Linux::signal_sets_init(); |
|
3743 |
Linux::install_signal_handlers(); |
|
3744 |
||
3745 |
size_t threadStackSizeInBytes = ThreadStackSize * K; |
|
3746 |
if (threadStackSizeInBytes != 0 && |
|
3747 |
threadStackSizeInBytes < Linux::min_stack_allowed) { |
|
3748 |
tty->print_cr("\nThe stack size specified is too small, " |
|
3749 |
"Specify at least %dk", |
|
3750 |
Linux::min_stack_allowed / K); |
|
3751 |
return JNI_ERR; |
|
3752 |
} |
|
3753 |
||
3754 |
// Make the stack size a multiple of the page size so that |
|
3755 |
// the yellow/red zones can be guarded. |
|
3756 |
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, |
|
3757 |
vm_page_size())); |
|
3758 |
||
3759 |
Linux::capture_initial_stack(JavaThread::stack_size_at_create()); |
|
3760 |
||
3761 |
Linux::libpthread_init(); |
|
3762 |
if (PrintMiscellaneous && (Verbose || WizardMode)) { |
|
3763 |
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n", |
|
3764 |
Linux::glibc_version(), Linux::libpthread_version(), |
|
3765 |
Linux::is_floating_stack() ? "floating stack" : "fixed stack"); |
|
3766 |
} |
|
3767 |
||
388 | 3768 |
if (UseNUMA) { |
1615
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3769 |
if (!Linux::libnuma_init()) { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3770 |
UseNUMA = false; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3771 |
} else { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3772 |
if ((Linux::numa_max_node() < 1)) { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3773 |
// There's only one node(they start from 0), disable NUMA. |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3774 |
UseNUMA = false; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3775 |
} |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3776 |
} |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3777 |
if (!UseNUMA && ForceNUMA) { |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3778 |
UseNUMA = true; |
b46d9f19bde2
6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents:
1388
diff
changeset
|
3779 |
} |
388 | 3780 |
} |
3781 |
||
1 | 3782 |
if (MaxFDLimit) { |
3783 |
// set the number of file descriptors to max. print out error |
|
3784 |
// if getrlimit/setrlimit fails but continue regardless. |
|
3785 |
struct rlimit nbr_files; |
|
3786 |
int status = getrlimit(RLIMIT_NOFILE, &nbr_files); |
|
3787 |
if (status != 0) { |
|
3788 |
if (PrintMiscellaneous && (Verbose || WizardMode)) |
|
3789 |
perror("os::init_2 getrlimit failed"); |
|
3790 |
} else { |
|
3791 |
nbr_files.rlim_cur = nbr_files.rlim_max; |
|
3792 |
status = setrlimit(RLIMIT_NOFILE, &nbr_files); |
|
3793 |
if (status != 0) { |
|
3794 |
if (PrintMiscellaneous && (Verbose || WizardMode)) |
|
3795 |
perror("os::init_2 setrlimit failed"); |
|
3796 |
} |
|
3797 |
} |
|
3798 |
} |
|
3799 |
||
3800 |
// Initialize lock used to serialize thread creation (see os::create_thread) |
|
3801 |
Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false)); |
|
3802 |
||
3803 |
// Initialize HPI. |
|
3804 |
jint hpi_result = hpi::initialize(); |
|
3805 |
if (hpi_result != JNI_OK) { |
|
3806 |
tty->print_cr("There was an error trying to initialize the HPI library."); |
|
3807 |
return hpi_result; |
|
3808 |
} |
|
3809 |
||
3810 |
// at-exit methods are called in the reverse order of their registration. |
|
3811 |
// atexit functions are called on return from main or as a result of a |
|
3812 |
// call to exit(3C). There can be only 32 of these functions registered |
|
3813 |
// and atexit() does not set errno. |
|
3814 |
||
3815 |
if (PerfAllowAtExitRegistration) { |
|
3816 |
// only register atexit functions if PerfAllowAtExitRegistration is set. |
|
3817 |
// atexit functions can be delayed until process exit time, which |
|
3818 |
// can be problematic for embedded VM situations. Embedded VMs should |
|
3819 |
// call DestroyJavaVM() to assure that VM resources are released. |
|
3820 |
||
3821 |
// note: perfMemory_exit_helper atexit function may be removed in |
|
3822 |
// the future if the appropriate cleanup code can be added to the |
|
3823 |
// VM_Exit VMOperation's doit method. |
|
3824 |
if (atexit(perfMemory_exit_helper) != 0) { |
|
3825 |
warning("os::init2 atexit(perfMemory_exit_helper) failed"); |
|
3826 |
} |
|
3827 |
} |
|
3828 |
||
3829 |
// initialize thread priority policy |
|
3830 |
prio_init(); |
|
3831 |
||
3832 |
return JNI_OK; |
|
3833 |
} |
|
3834 |
||
3835 |
// Mark the polling page as unreadable |
|
3836 |
void os::make_polling_page_unreadable(void) { |
|
3837 |
if( !guard_memory((char*)_polling_page, Linux::page_size()) ) |
|
3838 |
fatal("Could not disable polling page"); |
|
3839 |
}; |
|
3840 |
||
3841 |
// Mark the polling page as readable |
|
3842 |
void os::make_polling_page_readable(void) { |
|
823
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
3843 |
if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) { |
1 | 3844 |
fatal("Could not enable polling page"); |
823
9a5271881bc0
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
781
diff
changeset
|
3845 |
} |
1 | 3846 |
}; |
3847 |
||
3848 |
int os::active_processor_count() { |
|
3849 |
// Linux doesn't yet have a (official) notion of processor sets, |
|
3850 |
// so just return the number of online processors. |
|
3851 |
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); |
|
3852 |
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); |
|
3853 |
return online_cpus; |
|
3854 |
} |
|
3855 |
||
3856 |
bool os::distribute_processes(uint length, uint* distribution) { |
|
3857 |
// Not yet implemented. |
|
3858 |
return false; |
|
3859 |
} |
|
3860 |
||
3861 |
bool os::bind_to_processor(uint processor_id) { |
|
3862 |
// Not yet implemented. |
|
3863 |
return false; |
|
3864 |
} |
|
3865 |
||
3866 |
/// |
|
3867 |
||
3868 |
// Suspends the target using the signal mechanism and then grabs the PC before |
|
3869 |
// resuming the target. Used by the flat-profiler only |
|
3870 |
ExtendedPC os::get_thread_pc(Thread* thread) { |
|
3871 |
// Make sure that it is called by the watcher for the VMThread |
|
3872 |
assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); |
|
3873 |
assert(thread->is_VM_thread(), "Can only be called for VMThread"); |
|
3874 |
||
3875 |
ExtendedPC epc; |
|
3876 |
||
3877 |
OSThread* osthread = thread->osthread(); |
|
3878 |
if (do_suspend(osthread)) { |
|
3879 |
if (osthread->ucontext() != NULL) { |
|
3880 |
epc = os::Linux::ucontext_get_pc(osthread->ucontext()); |
|
3881 |
} else { |
|
3882 |
// NULL context is unexpected, double-check this is the VMThread |
|
3883 |
guarantee(thread->is_VM_thread(), "can only be called for VMThread"); |
|
3884 |
} |
|
3885 |
do_resume(osthread); |
|
3886 |
} |
|
3887 |
// failure means pthread_kill failed for some reason - arguably this is |
|
3888 |
// a fatal problem, but such problems are ignored elsewhere |
|
3889 |
||
3890 |
return epc; |
|
3891 |
} |
|
3892 |
||
3893 |
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) |
|
3894 |
{ |
|
3895 |
if (is_NPTL()) { |
|
3896 |
return pthread_cond_timedwait(_cond, _mutex, _abstime); |
|
3897 |
} else { |
|
3898 |
#ifndef IA64 |
|
3899 |
// 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control |
|
3900 |
// word back to default 64bit precision if condvar is signaled. Java |
|
3901 |
// wants 53bit precision. Save and restore current value. |
|
3902 |
int fpu = get_fpu_control_word(); |
|
3903 |
#endif // IA64 |
|
3904 |
int status = pthread_cond_timedwait(_cond, _mutex, _abstime); |
|
3905 |
#ifndef IA64 |
|
3906 |
set_fpu_control_word(fpu); |
|
3907 |
#endif // IA64 |
|
3908 |
return status; |
|
3909 |
} |
|
3910 |
} |
|
3911 |
||
3912 |
//////////////////////////////////////////////////////////////////////////////// |
|
3913 |
// debug support |
|
3914 |
||
3915 |
#ifndef PRODUCT |
|
3916 |
static address same_page(address x, address y) { |
|
3917 |
int page_bits = -os::vm_page_size(); |
|
3918 |
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) |
|
3919 |
return x; |
|
3920 |
else if (x > y) |
|
3921 |
return (address)(intptr_t(y) | ~page_bits) + 1; |
|
3922 |
else |
|
3923 |
return (address)(intptr_t(y) & page_bits); |
|
3924 |
} |
|
3925 |
||
3926 |
bool os::find(address addr) { |
|
3927 |
Dl_info dlinfo; |
|
3928 |
memset(&dlinfo, 0, sizeof(dlinfo)); |
|
3929 |
if (dladdr(addr, &dlinfo)) { |
|
3930 |
tty->print(PTR_FORMAT ": ", addr); |
|
3931 |
if (dlinfo.dli_sname != NULL) { |
|
3932 |
tty->print("%s+%#x", dlinfo.dli_sname, |
|
3933 |
addr - (intptr_t)dlinfo.dli_saddr); |
|
3934 |
} else if (dlinfo.dli_fname) { |
|
3935 |
tty->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase); |
|
3936 |
} else { |
|
3937 |
tty->print("<absolute address>"); |
|
3938 |
} |
|
3939 |
if (dlinfo.dli_fname) { |
|
3940 |
tty->print(" in %s", dlinfo.dli_fname); |
|
3941 |
} |
|
3942 |
if (dlinfo.dli_fbase) { |
|
3943 |
tty->print(" at " PTR_FORMAT, dlinfo.dli_fbase); |
|
3944 |
} |
|
3945 |
tty->cr(); |
|
3946 |
||
3947 |
if (Verbose) { |
|
3948 |
// decode some bytes around the PC |
|
3949 |
address begin = same_page(addr-40, addr); |
|
3950 |
address end = same_page(addr+40, addr); |
|
3951 |
address lowest = (address) dlinfo.dli_sname; |
|
3952 |
if (!lowest) lowest = (address) dlinfo.dli_fbase; |
|
3953 |
if (begin < lowest) begin = lowest; |
|
3954 |
Dl_info dlinfo2; |
|
3955 |
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr |
|
3956 |
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) |
|
3957 |
end = (address) dlinfo2.dli_saddr; |
|
3958 |
Disassembler::decode(begin, end); |
|
3959 |
} |
|
3960 |
return true; |
|
3961 |
} |
|
3962 |
return false; |
|
3963 |
} |
|
3964 |
||
3965 |
#endif |
|
3966 |
||
3967 |
//////////////////////////////////////////////////////////////////////////////// |
|
3968 |
// misc |
|
3969 |
||
3970 |
// This does not do anything on Linux. This is basically a hook for being |
|
3971 |
// able to use structured exception handling (thread-local exception filters) |
|
3972 |
// on, e.g., Win32. |
|
3973 |
void |
|
3974 |
os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, |
|
3975 |
JavaCallArguments* args, Thread* thread) { |
|
3976 |
f(value, method, args, thread); |
|
3977 |
} |
|
3978 |
||
3979 |
void os::print_statistics() { |
|
3980 |
} |
|
3981 |
||
3982 |
int os::message_box(const char* title, const char* message) { |
|
3983 |
int i; |
|
3984 |
fdStream err(defaultStream::error_fd()); |
|
3985 |
for (i = 0; i < 78; i++) err.print_raw("="); |
|
3986 |
err.cr(); |
|
3987 |
err.print_raw_cr(title); |
|
3988 |
for (i = 0; i < 78; i++) err.print_raw("-"); |
|
3989 |
err.cr(); |
|
3990 |
err.print_raw_cr(message); |
|
3991 |
for (i = 0; i < 78; i++) err.print_raw("="); |
|
3992 |
err.cr(); |
|
3993 |
||
3994 |
char buf[16]; |
|
3995 |
// Prevent process from exiting upon "read error" without consuming all CPU |
|
3996 |
while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } |
|
3997 |
||
3998 |
return buf[0] == 'y' || buf[0] == 'Y'; |
|
3999 |
} |
|
4000 |
||
4001 |
int os::stat(const char *path, struct stat *sbuf) { |
|
4002 |
char pathbuf[MAX_PATH]; |
|
4003 |
if (strlen(path) > MAX_PATH - 1) { |
|
4004 |
errno = ENAMETOOLONG; |
|
4005 |
return -1; |
|
4006 |
} |
|
4007 |
hpi::native_path(strcpy(pathbuf, path)); |
|
4008 |
return ::stat(pathbuf, sbuf); |
|
4009 |
} |
|
4010 |
||
4011 |
bool os::check_heap(bool force) { |
|
4012 |
return true; |
|
4013 |
} |
|
4014 |
||
4015 |
int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) { |
|
4016 |
return ::vsnprintf(buf, count, format, args); |
|
4017 |
} |
|
4018 |
||
4019 |
// Is a (classpath) directory empty? |
|
4020 |
bool os::dir_is_empty(const char* path) { |
|
4021 |
DIR *dir = NULL; |
|
4022 |
struct dirent *ptr; |
|
4023 |
||
4024 |
dir = opendir(path); |
|
4025 |
if (dir == NULL) return true; |
|
4026 |
||
4027 |
/* Scan the directory */ |
|
4028 |
bool result = true; |
|
4029 |
char buf[sizeof(struct dirent) + MAX_PATH]; |
|
4030 |
while (result && (ptr = ::readdir(dir)) != NULL) { |
|
4031 |
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { |
|
4032 |
result = false; |
|
4033 |
} |
|
4034 |
} |
|
4035 |
closedir(dir); |
|
4036 |
return result; |
|
4037 |
} |
|
4038 |
||
4039 |
// create binary file, rewriting existing file if required |
|
4040 |
int os::create_binary_file(const char* path, bool rewrite_existing) { |
|
4041 |
int oflags = O_WRONLY | O_CREAT; |
|
4042 |
if (!rewrite_existing) { |
|
4043 |
oflags |= O_EXCL; |
|
4044 |
} |
|
4045 |
return ::open64(path, oflags, S_IREAD | S_IWRITE); |
|
4046 |
} |
|
4047 |
||
4048 |
// return current position of file pointer |
|
4049 |
jlong os::current_file_offset(int fd) { |
|
4050 |
return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); |
|
4051 |
} |
|
4052 |
||
4053 |
// move file pointer to the specified offset |
|
4054 |
jlong os::seek_to_file_offset(int fd, jlong offset) { |
|
4055 |
return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); |
|
4056 |
} |
|
4057 |
||
4058 |
// Map a block of memory. |
|
4059 |
char* os::map_memory(int fd, const char* file_name, size_t file_offset, |
|
4060 |
char *addr, size_t bytes, bool read_only, |
|
4061 |
bool allow_exec) { |
|
4062 |
int prot; |
|
4063 |
int flags; |
|
4064 |
||
4065 |
if (read_only) { |
|
4066 |
prot = PROT_READ; |
|
4067 |
flags = MAP_SHARED; |
|
4068 |
} else { |
|
4069 |
prot = PROT_READ | PROT_WRITE; |
|
4070 |
flags = MAP_PRIVATE; |
|
4071 |
} |
|
4072 |
||
4073 |
if (allow_exec) { |
|
4074 |
prot |= PROT_EXEC; |
|
4075 |
} |
|
4076 |
||
4077 |
if (addr != NULL) { |
|
4078 |
flags |= MAP_FIXED; |
|
4079 |
} |
|
4080 |
||
4081 |
char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, |
|
4082 |
fd, file_offset); |
|
4083 |
if (mapped_address == MAP_FAILED) { |
|
4084 |
return NULL; |
|
4085 |
} |
|
4086 |
return mapped_address; |
|
4087 |
} |
|
4088 |
||
4089 |
||
4090 |
// Remap a block of memory. |
|
4091 |
char* os::remap_memory(int fd, const char* file_name, size_t file_offset, |
|
4092 |
char *addr, size_t bytes, bool read_only, |
|
4093 |
bool allow_exec) { |
|
4094 |
// same as map_memory() on this OS |
|
4095 |
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, |
|
4096 |
allow_exec); |
|
4097 |
} |
|
4098 |
||
4099 |
||
4100 |
// Unmap a block of memory. |
|
4101 |
bool os::unmap_memory(char* addr, size_t bytes) { |
|
4102 |
return munmap(addr, bytes) == 0; |
|
4103 |
} |
|
4104 |
||
4105 |
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); |
|
4106 |
||
4107 |
static clockid_t thread_cpu_clockid(Thread* thread) { |
|
4108 |
pthread_t tid = thread->osthread()->pthread_id(); |
|
4109 |
clockid_t clockid; |
|
4110 |
||
4111 |
// Get thread clockid |
|
4112 |
int rc = os::Linux::pthread_getcpuclockid(tid, &clockid); |
|
4113 |
assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code"); |
|
4114 |
return clockid; |
|
4115 |
} |
|
4116 |
||
4117 |
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) |
|
4118 |
// are used by JVM M&M and JVMTI to get user+sys or user CPU time |
|
4119 |
// of a thread. |
|
4120 |
// |
|
4121 |
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns |
|
4122 |
// the fast estimate available on the platform. |
|
4123 |
||
4124 |
jlong os::current_thread_cpu_time() { |
|
4125 |
if (os::Linux::supports_fast_thread_cpu_time()) { |
|
4126 |
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); |
|
4127 |
} else { |
|
4128 |
// return user + sys since the cost is the same |
|
4129 |
return slow_thread_cpu_time(Thread::current(), true /* user + sys */); |
|
4130 |
} |
|
4131 |
} |
|
4132 |
||
4133 |
jlong os::thread_cpu_time(Thread* thread) { |
|
4134 |
// consistent with what current_thread_cpu_time() returns |
|
4135 |
if (os::Linux::supports_fast_thread_cpu_time()) { |
|
4136 |
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread)); |
|
4137 |
} else { |
|
4138 |
return slow_thread_cpu_time(thread, true /* user + sys */); |
|
4139 |
} |
|
4140 |
} |
|
4141 |
||
4142 |
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { |
|
4143 |
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { |
|
4144 |
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID); |
|
4145 |
} else { |
|
4146 |
return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time); |
|
4147 |
} |
|
4148 |
} |
|
4149 |
||
4150 |
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { |
|
4151 |
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { |
|
4152 |
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread)); |
|
4153 |
} else { |
|
4154 |
return slow_thread_cpu_time(thread, user_sys_cpu_time); |
|
4155 |
} |
|
4156 |
} |
|
4157 |
||
4158 |
// |
|
4159 |
// -1 on error. |
|
4160 |
// |
|
4161 |
||
4162 |
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { |
|
4163 |
static bool proc_pid_cpu_avail = true; |
|
4164 |
static bool proc_task_unchecked = true; |
|
4165 |
static const char *proc_stat_path = "/proc/%d/stat"; |
|
4166 |
pid_t tid = thread->osthread()->thread_id(); |
|
4167 |
int i; |
|
4168 |
char *s; |
|
4169 |
char stat[2048]; |
|
4170 |
int statlen; |
|
4171 |
char proc_name[64]; |
|
4172 |
int count; |
|
4173 |
long sys_time, user_time; |
|
4174 |
char string[64]; |
|
4175 |
int idummy; |
|
4176 |
long ldummy; |
|
4177 |
FILE *fp; |
|
4178 |
||
4179 |
// We first try accessing /proc/<pid>/cpu since this is faster to |
|
4180 |
// process. If this file is not present (linux kernels 2.5 and above) |
|
4181 |
// then we open /proc/<pid>/stat. |
|
4182 |
if ( proc_pid_cpu_avail ) { |
|
4183 |
sprintf(proc_name, "/proc/%d/cpu", tid); |
|
4184 |
fp = fopen(proc_name, "r"); |
|
4185 |
if ( fp != NULL ) { |
|
4186 |
count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time); |
|
4187 |
fclose(fp); |
|
4188 |
if ( count != 3 ) return -1; |
|
4189 |
||
4190 |
if (user_sys_cpu_time) { |
|
4191 |
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); |
|
4192 |
} else { |
|
4193 |
return (jlong)user_time * (1000000000 / clock_tics_per_sec); |
|
4194 |
} |
|
4195 |
} |
|
4196 |
else proc_pid_cpu_avail = false; |
|
4197 |
} |
|
4198 |
||
4199 |
// The /proc/<tid>/stat aggregates per-process usage on |
|
4200 |
// new Linux kernels 2.6+ where NPTL is supported. |
|
4201 |
// The /proc/self/task/<tid>/stat still has the per-thread usage. |
|
4202 |
// See bug 6328462. |
|
4203 |
// There can be no directory /proc/self/task on kernels 2.4 with NPTL |
|
4204 |
// and possibly in some other cases, so we check its availability. |
|
4205 |
if (proc_task_unchecked && os::Linux::is_NPTL()) { |
|
4206 |
// This is executed only once |
|
4207 |
proc_task_unchecked = false; |
|
4208 |
fp = fopen("/proc/self/task", "r"); |
|
4209 |
if (fp != NULL) { |
|
4210 |
proc_stat_path = "/proc/self/task/%d/stat"; |
|
4211 |
fclose(fp); |
|
4212 |
} |
|
4213 |
} |
|
4214 |
||
4215 |
sprintf(proc_name, proc_stat_path, tid); |
|
4216 |
fp = fopen(proc_name, "r"); |
|
4217 |
if ( fp == NULL ) return -1; |
|
4218 |
statlen = fread(stat, 1, 2047, fp); |
|
4219 |
stat[statlen] = '\0'; |
|
4220 |
fclose(fp); |
|
4221 |
||
4222 |
// Skip pid and the command string. Note that we could be dealing with |
|
4223 |
// weird command names, e.g. user could decide to rename java launcher |
|
4224 |
// to "java 1.4.2 :)", then the stat file would look like |
|
4225 |
// 1234 (java 1.4.2 :)) R ... ... |
|
4226 |
// We don't really need to know the command string, just find the last |
|
4227 |
// occurrence of ")" and then start parsing from there. See bug 4726580. |
|
4228 |
s = strrchr(stat, ')'); |
|
4229 |
i = 0; |
|
4230 |
if (s == NULL ) return -1; |
|
4231 |
||
4232 |
// Skip blank chars |
|
4233 |
do s++; while (isspace(*s)); |
|
4234 |
||
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
4235 |
count = sscanf(s,"%*c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu", |
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
4236 |
&idummy, &idummy, &idummy, &idummy, &idummy, |
1 | 4237 |
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy, |
4238 |
&user_time, &sys_time); |
|
1889
24b003a6fe46
6781583: Hotspot build fails on linux 64 bit platform with gcc 4.3.2
xlu
parents:
1664
diff
changeset
|
4239 |
if ( count != 12 ) return -1; |
1 | 4240 |
if (user_sys_cpu_time) { |
4241 |
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec); |
|
4242 |
} else { |
|
4243 |
return (jlong)user_time * (1000000000 / clock_tics_per_sec); |
|
4244 |
} |
|
4245 |
} |
|
4246 |
||
4247 |
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { |
|
4248 |
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits |
|
4249 |
info_ptr->may_skip_backward = false; // elapsed time not wall time |
|
4250 |
info_ptr->may_skip_forward = false; // elapsed time not wall time |
|
4251 |
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned |
|
4252 |
} |
|
4253 |
||
4254 |
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { |
|
4255 |
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits |
|
4256 |
info_ptr->may_skip_backward = false; // elapsed time not wall time |
|
4257 |
info_ptr->may_skip_forward = false; // elapsed time not wall time |
|
4258 |
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned |
|
4259 |
} |
|
4260 |
||
4261 |
bool os::is_thread_cpu_time_supported() { |
|
4262 |
return true; |
|
4263 |
} |
|
4264 |
||
4265 |
// System loadavg support. Returns -1 if load average cannot be obtained. |
|
4266 |
// Linux doesn't yet have a (official) notion of processor sets, |
|
4267 |
// so just return the system wide load average. |
|
4268 |
int os::loadavg(double loadavg[], int nelem) { |
|
4269 |
return ::getloadavg(loadavg, nelem); |
|
4270 |
} |
|
4271 |
||
4272 |
void os::pause() { |
|
4273 |
char filename[MAX_PATH]; |
|
4274 |
if (PauseAtStartupFile && PauseAtStartupFile[0]) { |
|
4275 |
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); |
|
4276 |
} else { |
|
4277 |
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); |
|
4278 |
} |
|
4279 |
||
4280 |
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); |
|
4281 |
if (fd != -1) { |
|
4282 |
struct stat buf; |
|
4283 |
close(fd); |
|
4284 |
while (::stat(filename, &buf) == 0) { |
|
4285 |
(void)::poll(NULL, 0, 100); |
|
4286 |
} |
|
4287 |
} else { |
|
4288 |
jio_fprintf(stderr, |
|
4289 |
"Could not open pause file '%s', continuing immediately.\n", filename); |
|
4290 |
} |
|
4291 |
} |
|
4292 |
||
4293 |
extern "C" { |
|
4294 |
||
4295 |
/** |
|
4296 |
* NOTE: the following code is to keep the green threads code |
|
4297 |
* in the libjava.so happy. Once the green threads is removed, |
|
4298 |
* these code will no longer be needed. |
|
4299 |
*/ |
|
4300 |
int |
|
4301 |
jdk_waitpid(pid_t pid, int* status, int options) { |
|
4302 |
return waitpid(pid, status, options); |
|
4303 |
} |
|
4304 |
||
4305 |
int |
|
4306 |
fork1() { |
|
4307 |
return fork(); |
|
4308 |
} |
|
4309 |
||
4310 |
int |
|
4311 |
jdk_sem_init(sem_t *sem, int pshared, unsigned int value) { |
|
4312 |
return sem_init(sem, pshared, value); |
|
4313 |
} |
|
4314 |
||
4315 |
int |
|
4316 |
jdk_sem_post(sem_t *sem) { |
|
4317 |
return sem_post(sem); |
|
4318 |
} |
|
4319 |
||
4320 |
int |
|
4321 |
jdk_sem_wait(sem_t *sem) { |
|
4322 |
return sem_wait(sem); |
|
4323 |
} |
|
4324 |
||
4325 |
int |
|
4326 |
jdk_pthread_sigmask(int how , const sigset_t* newmask, sigset_t* oldmask) { |
|
4327 |
return pthread_sigmask(how , newmask, oldmask); |
|
4328 |
} |
|
4329 |
||
4330 |
} |
|
4331 |
||
4332 |
// Refer to the comments in os_solaris.cpp park-unpark. |
|
4333 |
// |
|
4334 |
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can |
|
4335 |
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable. |
|
4336 |
// For specifics regarding the bug see GLIBC BUGID 261237 : |
|
4337 |
// http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html. |
|
4338 |
// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future |
|
4339 |
// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar |
|
4340 |
// is used. (The simple C test-case provided in the GLIBC bug report manifests the |
|
4341 |
// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos() |
|
4342 |
// and monitorenter when we're using 1-0 locking. All those operations may result in |
|
4343 |
// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version |
|
4344 |
// of libpthread avoids the problem, but isn't practical. |
|
4345 |
// |
|
4346 |
// Possible remedies: |
|
4347 |
// |
|
4348 |
// 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work. |
|
4349 |
// This is palliative and probabilistic, however. If the thread is preempted |
|
4350 |
// between the call to compute_abstime() and pthread_cond_timedwait(), more |
|
4351 |
// than the minimum period may have passed, and the abstime may be stale (in the |
|
4352 |
// past) resultin in a hang. Using this technique reduces the odds of a hang |
|
4353 |
// but the JVM is still vulnerable, particularly on heavily loaded systems. |
|
4354 |
// |
|
4355 |
// 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead |
|
4356 |
// of the usual flag-condvar-mutex idiom. The write side of the pipe is set |
|
4357 |
// NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo) |
|
4358 |
// reduces to poll()+read(). This works well, but consumes 2 FDs per extant |
|
4359 |
// thread. |
|
4360 |
// |
|
4361 |
// 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread |
|
4362 |
// that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing |
|
4363 |
// a timeout request to the chron thread and then blocking via pthread_cond_wait(). |
|
4364 |
// This also works well. In fact it avoids kernel-level scalability impediments |
|
4365 |
// on certain platforms that don't handle lots of active pthread_cond_timedwait() |
|
4366 |
// timers in a graceful fashion. |
|
4367 |
// |
|
4368 |
// 4. When the abstime value is in the past it appears that control returns |
|
4369 |
// correctly from pthread_cond_timedwait(), but the condvar is left corrupt. |
|
4370 |
// Subsequent timedwait/wait calls may hang indefinitely. Given that, we |
|
4371 |
// can avoid the problem by reinitializing the condvar -- by cond_destroy() |
|
4372 |
// followed by cond_init() -- after all calls to pthread_cond_timedwait(). |
|
4373 |
// It may be possible to avoid reinitialization by checking the return |
|
4374 |
// value from pthread_cond_timedwait(). In addition to reinitializing the |
|
4375 |
// condvar we must establish the invariant that cond_signal() is only called |
|
4376 |
// within critical sections protected by the adjunct mutex. This prevents |
|
4377 |
// cond_signal() from "seeing" a condvar that's in the midst of being |
|
4378 |
// reinitialized or that is corrupt. Sadly, this invariant obviates the |
|
4379 |
// desirable signal-after-unlock optimization that avoids futile context switching. |
|
4380 |
// |
|
4381 |
// I'm also concerned that some versions of NTPL might allocate an auxilliary |
|
4382 |
// structure when a condvar is used or initialized. cond_destroy() would |
|
4383 |
// release the helper structure. Our reinitialize-after-timedwait fix |
|
4384 |
// put excessive stress on malloc/free and locks protecting the c-heap. |
|
4385 |
// |
|
4386 |
// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag. |
|
4387 |
// It may be possible to refine (4) by checking the kernel and NTPL verisons |
|
4388 |
// and only enabling the work-around for vulnerable environments. |
|
4389 |
||
4390 |
// utility to compute the abstime argument to timedwait: |
|
4391 |
// millis is the relative timeout time |
|
4392 |
// abstime will be the absolute timeout time |
|
4393 |
// TODO: replace compute_abstime() with unpackTime() |
|
4394 |
||
4395 |
static struct timespec* compute_abstime(timespec* abstime, jlong millis) { |
|
4396 |
if (millis < 0) millis = 0; |
|
4397 |
struct timeval now; |
|
4398 |
int status = gettimeofday(&now, NULL); |
|
4399 |
assert(status == 0, "gettimeofday"); |
|
4400 |
jlong seconds = millis / 1000; |
|
4401 |
millis %= 1000; |
|
4402 |
if (seconds > 50000000) { // see man cond_timedwait(3T) |
|
4403 |
seconds = 50000000; |
|
4404 |
} |
|
4405 |
abstime->tv_sec = now.tv_sec + seconds; |
|
4406 |
long usec = now.tv_usec + millis * 1000; |
|
4407 |
if (usec >= 1000000) { |
|
4408 |
abstime->tv_sec += 1; |
|
4409 |
usec -= 1000000; |
|
4410 |
} |
|
4411 |
abstime->tv_nsec = usec * 1000; |
|
4412 |
return abstime; |
|
4413 |
} |
|
4414 |
||
4415 |
||
4416 |
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately. |
|
4417 |
// Conceptually TryPark() should be equivalent to park(0). |
|
4418 |
||
4419 |
int os::PlatformEvent::TryPark() { |
|
4420 |
for (;;) { |
|
4421 |
const int v = _Event ; |
|
4422 |
guarantee ((v == 0) || (v == 1), "invariant") ; |
|
4423 |
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; |
|
4424 |
} |
|
4425 |
} |
|
4426 |
||
4427 |
void os::PlatformEvent::park() { // AKA "down()" |
|
4428 |
// Invariant: Only the thread associated with the Event/PlatformEvent |
|
4429 |
// may call park(). |
|
4430 |
// TODO: assert that _Assoc != NULL or _Assoc == Self |
|
4431 |
int v ; |
|
4432 |
for (;;) { |
|
4433 |
v = _Event ; |
|
4434 |
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; |
|
4435 |
} |
|
4436 |
guarantee (v >= 0, "invariant") ; |
|
4437 |
if (v == 0) { |
|
4438 |
// Do this the hard way by blocking ... |
|
4439 |
int status = pthread_mutex_lock(_mutex); |
|
4440 |
assert_status(status == 0, status, "mutex_lock"); |
|
4441 |
guarantee (_nParked == 0, "invariant") ; |
|
4442 |
++ _nParked ; |
|
4443 |
while (_Event < 0) { |
|
4444 |
status = pthread_cond_wait(_cond, _mutex); |
|
4445 |
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ... |
|
4446 |
// Treat this the same as if the wait was interrupted |
|
4447 |
if (status == ETIME) { status = EINTR; } |
|
4448 |
assert_status(status == 0 || status == EINTR, status, "cond_wait"); |
|
4449 |
} |
|
4450 |
-- _nParked ; |
|
4451 |
||
4452 |
// In theory we could move the ST of 0 into _Event past the unlock(), |
|
4453 |
// but then we'd need a MEMBAR after the ST. |
|
4454 |
_Event = 0 ; |
|
4455 |
status = pthread_mutex_unlock(_mutex); |
|
4456 |
assert_status(status == 0, status, "mutex_unlock"); |
|
4457 |
} |
|
4458 |
guarantee (_Event >= 0, "invariant") ; |
|
4459 |
} |
|
4460 |
||
4461 |
int os::PlatformEvent::park(jlong millis) { |
|
4462 |
guarantee (_nParked == 0, "invariant") ; |
|
4463 |
||
4464 |
int v ; |
|
4465 |
for (;;) { |
|
4466 |
v = _Event ; |
|
4467 |
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; |
|
4468 |
} |
|
4469 |
guarantee (v >= 0, "invariant") ; |
|
4470 |
if (v != 0) return OS_OK ; |
|
4471 |
||
4472 |
// We do this the hard way, by blocking the thread. |
|
4473 |
// Consider enforcing a minimum timeout value. |
|
4474 |
struct timespec abst; |
|
4475 |
compute_abstime(&abst, millis); |
|
4476 |
||
4477 |
int ret = OS_TIMEOUT; |
|
4478 |
int status = pthread_mutex_lock(_mutex); |
|
4479 |
assert_status(status == 0, status, "mutex_lock"); |
|
4480 |
guarantee (_nParked == 0, "invariant") ; |
|
4481 |
++_nParked ; |
|
4482 |
||
4483 |
// Object.wait(timo) will return because of |
|
4484 |
// (a) notification |
|
4485 |
// (b) timeout |
|
4486 |
// (c) thread.interrupt |
|
4487 |
// |
|
4488 |
// Thread.interrupt and object.notify{All} both call Event::set. |
|
4489 |
// That is, we treat thread.interrupt as a special case of notification. |
|
4490 |
// The underlying Solaris implementation, cond_timedwait, admits |
|
4491 |
// spurious/premature wakeups, but the JLS/JVM spec prevents the |
|
4492 |
// JVM from making those visible to Java code. As such, we must |
|
4493 |
// filter out spurious wakeups. We assume all ETIME returns are valid. |
|
4494 |
// |
|
4495 |
// TODO: properly differentiate simultaneous notify+interrupt. |
|
4496 |
// In that case, we should propagate the notify to another waiter. |
|
4497 |
||
4498 |
while (_Event < 0) { |
|
4499 |
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst); |
|
4500 |
if (status != 0 && WorkAroundNPTLTimedWaitHang) { |
|
4501 |
pthread_cond_destroy (_cond); |
|
4502 |
pthread_cond_init (_cond, NULL) ; |
|
4503 |
} |
|
4504 |
assert_status(status == 0 || status == EINTR || |
|
4505 |
status == ETIME || status == ETIMEDOUT, |
|
4506 |
status, "cond_timedwait"); |
|
4507 |
if (!FilterSpuriousWakeups) break ; // previous semantics |
|
4508 |
if (status == ETIME || status == ETIMEDOUT) break ; |
|
4509 |
// We consume and ignore EINTR and spurious wakeups. |
|
4510 |
} |
|
4511 |
--_nParked ; |
|
4512 |
if (_Event >= 0) { |
|
4513 |
ret = OS_OK; |
|
4514 |
} |
|
4515 |
_Event = 0 ; |
|
4516 |
status = pthread_mutex_unlock(_mutex); |
|
4517 |
assert_status(status == 0, status, "mutex_unlock"); |
|
4518 |
assert (_nParked == 0, "invariant") ; |
|
4519 |
return ret; |
|
4520 |
} |
|
4521 |
||
4522 |
void os::PlatformEvent::unpark() { |
|
4523 |
int v, AnyWaiters ; |
|
4524 |
for (;;) { |
|
4525 |
v = _Event ; |
|
4526 |
if (v > 0) { |
|
4527 |
// The LD of _Event could have reordered or be satisfied |
|
4528 |
// by a read-aside from this processor's write buffer. |
|
4529 |
// To avoid problems execute a barrier and then |
|
4530 |
// ratify the value. |
|
4531 |
OrderAccess::fence() ; |
|
4532 |
if (_Event == v) return ; |
|
4533 |
continue ; |
|
4534 |
} |
|
4535 |
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; |
|
4536 |
} |
|
4537 |
if (v < 0) { |
|
4538 |
// Wait for the thread associated with the event to vacate |
|
4539 |
int status = pthread_mutex_lock(_mutex); |
|
4540 |
assert_status(status == 0, status, "mutex_lock"); |
|
4541 |
AnyWaiters = _nParked ; |
|
4542 |
assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; |
|
4543 |
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) { |
|
4544 |
AnyWaiters = 0 ; |
|
4545 |
pthread_cond_signal (_cond); |
|
4546 |
} |
|
4547 |
status = pthread_mutex_unlock(_mutex); |
|
4548 |
assert_status(status == 0, status, "mutex_unlock"); |
|
4549 |
if (AnyWaiters != 0) { |
|
4550 |
status = pthread_cond_signal(_cond); |
|
4551 |
assert_status(status == 0, status, "cond_signal"); |
|
4552 |
} |
|
4553 |
} |
|
4554 |
||
4555 |
// Note that we signal() _after dropping the lock for "immortal" Events. |
|
4556 |
// This is safe and avoids a common class of futile wakeups. In rare |
|
4557 |
// circumstances this can cause a thread to return prematurely from |
|
4558 |
// cond_{timed}wait() but the spurious wakeup is benign and the victim will |
|
4559 |
// simply re-test the condition and re-park itself. |
|
4560 |
} |
|
4561 |
||
4562 |
||
4563 |
// JSR166 |
|
4564 |
// ------------------------------------------------------- |
|
4565 |
||
4566 |
/* |
|
4567 |
* The solaris and linux implementations of park/unpark are fairly |
|
4568 |
* conservative for now, but can be improved. They currently use a |
|
4569 |
* mutex/condvar pair, plus a a count. |
|
4570 |
* Park decrements count if > 0, else does a condvar wait. Unpark |
|
4571 |
* sets count to 1 and signals condvar. Only one thread ever waits |
|
4572 |
* on the condvar. Contention seen when trying to park implies that someone |
|
4573 |
* is unparking you, so don't wait. And spurious returns are fine, so there |
|
4574 |
* is no need to track notifications. |
|
4575 |
*/ |
|
4576 |
||
4577 |
||
4578 |
#define NANOSECS_PER_SEC 1000000000 |
|
4579 |
#define NANOSECS_PER_MILLISEC 1000000 |
|
4580 |
#define MAX_SECS 100000000 |
|
4581 |
/* |
|
4582 |
* This code is common to linux and solaris and will be moved to a |
|
4583 |
* common place in dolphin. |
|
4584 |
* |
|
4585 |
* The passed in time value is either a relative time in nanoseconds |
|
4586 |
* or an absolute time in milliseconds. Either way it has to be unpacked |
|
4587 |
* into suitable seconds and nanoseconds components and stored in the |
|
4588 |
* given timespec structure. |
|
4589 |
* Given time is a 64-bit value and the time_t used in the timespec is only |
|
4590 |
* a signed-32-bit value (except on 64-bit Linux) we have to watch for |
|
4591 |
* overflow if times way in the future are given. Further on Solaris versions |
|
4592 |
* prior to 10 there is a restriction (see cond_timedwait) that the specified |
|
4593 |
* number of seconds, in abstime, is less than current_time + 100,000,000. |
|
4594 |
* As it will be 28 years before "now + 100000000" will overflow we can |
|
4595 |
* ignore overflow and just impose a hard-limit on seconds using the value |
|
4596 |
* of "now + 100,000,000". This places a limit on the timeout of about 3.17 |
|
4597 |
* years from "now". |
|
4598 |
*/ |
|
4599 |
||
4600 |
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { |
|
4601 |
assert (time > 0, "convertTime"); |
|
4602 |
||
4603 |
struct timeval now; |
|
4604 |
int status = gettimeofday(&now, NULL); |
|
4605 |
assert(status == 0, "gettimeofday"); |
|
4606 |
||
4607 |
time_t max_secs = now.tv_sec + MAX_SECS; |
|
4608 |
||
4609 |
if (isAbsolute) { |
|
4610 |
jlong secs = time / 1000; |
|
4611 |
if (secs > max_secs) { |
|
4612 |
absTime->tv_sec = max_secs; |
|
4613 |
} |
|
4614 |
else { |
|
4615 |
absTime->tv_sec = secs; |
|
4616 |
} |
|
4617 |
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; |
|
4618 |
} |
|
4619 |
else { |
|
4620 |
jlong secs = time / NANOSECS_PER_SEC; |
|
4621 |
if (secs >= MAX_SECS) { |
|
4622 |
absTime->tv_sec = max_secs; |
|
4623 |
absTime->tv_nsec = 0; |
|
4624 |
} |
|
4625 |
else { |
|
4626 |
absTime->tv_sec = now.tv_sec + secs; |
|
4627 |
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; |
|
4628 |
if (absTime->tv_nsec >= NANOSECS_PER_SEC) { |
|
4629 |
absTime->tv_nsec -= NANOSECS_PER_SEC; |
|
4630 |
++absTime->tv_sec; // note: this must be <= max_secs |
|
4631 |
} |
|
4632 |
} |
|
4633 |
} |
|
4634 |
assert(absTime->tv_sec >= 0, "tv_sec < 0"); |
|
4635 |
assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); |
|
4636 |
assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); |
|
4637 |
assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); |
|
4638 |
} |
|
4639 |
||
4640 |
void Parker::park(bool isAbsolute, jlong time) { |
|
4641 |
// Optional fast-path check: |
|
4642 |
// Return immediately if a permit is available. |
|
4643 |
if (_counter > 0) { |
|
4644 |
_counter = 0 ; |
|
4645 |
return ; |
|
4646 |
} |
|
4647 |
||
4648 |
Thread* thread = Thread::current(); |
|
4649 |
assert(thread->is_Java_thread(), "Must be JavaThread"); |
|
4650 |
JavaThread *jt = (JavaThread *)thread; |
|
4651 |
||
4652 |
// Optional optimization -- avoid state transitions if there's an interrupt pending. |
|
4653 |
// Check interrupt before trying to wait |
|
4654 |
if (Thread::is_interrupted(thread, false)) { |
|
4655 |
return; |
|
4656 |
} |
|
4657 |
||
4658 |
// Next, demultiplex/decode time arguments |
|
4659 |
timespec absTime; |
|
4660 |
if (time < 0) { // don't wait at all |
|
4661 |
return; |
|
4662 |
} |
|
4663 |
if (time > 0) { |
|
4664 |
unpackTime(&absTime, isAbsolute, time); |
|
4665 |
} |
|
4666 |
||
4667 |
||
4668 |
// Enter safepoint region |
|
4669 |
// Beware of deadlocks such as 6317397. |
|
4670 |
// The per-thread Parker:: mutex is a classic leaf-lock. |
|
4671 |
// In particular a thread must never block on the Threads_lock while |
|
4672 |
// holding the Parker:: mutex. If safepoints are pending both the |
|
4673 |
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. |
|
4674 |
ThreadBlockInVM tbivm(jt); |
|
4675 |
||
4676 |
// Don't wait if cannot get lock since interference arises from |
|
4677 |
// unblocking. Also. check interrupt before trying wait |
|
4678 |
if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) { |
|
4679 |
return; |
|
4680 |
} |
|
4681 |
||
4682 |
int status ; |
|
4683 |
if (_counter > 0) { // no wait needed |
|
4684 |
_counter = 0; |
|
4685 |
status = pthread_mutex_unlock(_mutex); |
|
4686 |
assert (status == 0, "invariant") ; |
|
4687 |
return; |
|
4688 |
} |
|
4689 |
||
4690 |
#ifdef ASSERT |
|
4691 |
// Don't catch signals while blocked; let the running threads have the signals. |
|
4692 |
// (This allows a debugger to break into the running thread.) |
|
4693 |
sigset_t oldsigs; |
|
4694 |
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals(); |
|
4695 |
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); |
|
4696 |
#endif |
|
4697 |
||
4698 |
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); |
|
4699 |
jt->set_suspend_equivalent(); |
|
4700 |
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() |
|
4701 |
||
4702 |
if (time == 0) { |
|
4703 |
status = pthread_cond_wait (_cond, _mutex) ; |
|
4704 |
} else { |
|
4705 |
status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ; |
|
4706 |
if (status != 0 && WorkAroundNPTLTimedWaitHang) { |
|
4707 |
pthread_cond_destroy (_cond) ; |
|
4708 |
pthread_cond_init (_cond, NULL); |
|
4709 |
} |
|
4710 |
} |
|
4711 |
assert_status(status == 0 || status == EINTR || |
|
4712 |
status == ETIME || status == ETIMEDOUT, |
|
4713 |
status, "cond_timedwait"); |
|
4714 |
||
4715 |
#ifdef ASSERT |
|
4716 |
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); |
|
4717 |
#endif |
|
4718 |
||
4719 |
_counter = 0 ; |
|
4720 |
status = pthread_mutex_unlock(_mutex) ; |
|
4721 |
assert_status(status == 0, status, "invariant") ; |
|
4722 |
// If externally suspended while waiting, re-suspend |
|
4723 |
if (jt->handle_special_suspend_equivalent_condition()) { |
|
4724 |
jt->java_suspend_self(); |
|
4725 |
} |
|
4726 |
||
4727 |
} |
|
4728 |
||
4729 |
void Parker::unpark() { |
|
4730 |
int s, status ; |
|
4731 |
status = pthread_mutex_lock(_mutex); |
|
4732 |
assert (status == 0, "invariant") ; |
|
4733 |
s = _counter; |
|
4734 |
_counter = 1; |
|
4735 |
if (s < 1) { |
|
4736 |
if (WorkAroundNPTLTimedWaitHang) { |
|
4737 |
status = pthread_cond_signal (_cond) ; |
|
4738 |
assert (status == 0, "invariant") ; |
|
4739 |
status = pthread_mutex_unlock(_mutex); |
|
4740 |
assert (status == 0, "invariant") ; |
|
4741 |
} else { |
|
4742 |
status = pthread_mutex_unlock(_mutex); |
|
4743 |
assert (status == 0, "invariant") ; |
|
4744 |
status = pthread_cond_signal (_cond) ; |
|
4745 |
assert (status == 0, "invariant") ; |
|
4746 |
} |
|
4747 |
} else { |
|
4748 |
pthread_mutex_unlock(_mutex); |
|
4749 |
assert (status == 0, "invariant") ; |
|
4750 |
} |
|
4751 |
} |
|
4752 |
||
4753 |
||
4754 |
extern char** environ; |
|
4755 |
||
4756 |
#ifndef __NR_fork |
|
4757 |
#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57) |
|
4758 |
#endif |
|
4759 |
||
4760 |
#ifndef __NR_execve |
|
4761 |
#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59) |
|
4762 |
#endif |
|
4763 |
||
4764 |
// Run the specified command in a separate process. Return its exit value, |
|
4765 |
// or -1 on failure (e.g. can't fork a new process). |
|
4766 |
// Unlike system(), this function can be called from signal handler. It |
|
4767 |
// doesn't block SIGINT et al. |
|
4768 |
int os::fork_and_exec(char* cmd) { |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
4769 |
const char * argv[4] = {"sh", "-c", cmd, NULL}; |
1 | 4770 |
|
4771 |
// fork() in LinuxThreads/NPTL is not async-safe. It needs to run |
|
4772 |
// pthread_atfork handlers and reset pthread library. All we need is a |
|
4773 |
// separate process to execve. Make a direct syscall to fork process. |
|
4774 |
// On IA64 there's no fork syscall, we have to use fork() and hope for |
|
4775 |
// the best... |
|
4776 |
pid_t pid = NOT_IA64(syscall(__NR_fork);) |
|
4777 |
IA64_ONLY(fork();) |
|
4778 |
||
4779 |
if (pid < 0) { |
|
4780 |
// fork failed |
|
4781 |
return -1; |
|
4782 |
||
4783 |
} else if (pid == 0) { |
|
4784 |
// child process |
|
4785 |
||
4786 |
// execve() in LinuxThreads will call pthread_kill_other_threads_np() |
|
4787 |
// first to kill every thread on the thread list. Because this list is |
|
4788 |
// not reset by fork() (see notes above), execve() will instead kill |
|
4789 |
// every thread in the parent process. We know this is the only thread |
|
4790 |
// in the new process, so make a system call directly. |
|
4791 |
// IA64 should use normal execve() from glibc to match the glibc fork() |
|
4792 |
// above. |
|
4793 |
NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);) |
|
745
47129a5cacd3
6681796: hotspot build failure on gcc 4.2.x (ubuntu 8.04) w/ openjdk 6
xlu
parents:
388
diff
changeset
|
4794 |
IA64_ONLY(execve("/bin/sh", (char* const*)argv, environ);) |
1 | 4795 |
|
4796 |
// execve failed |
|
4797 |
_exit(-1); |
|
4798 |
||
4799 |
} else { |
|
4800 |
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't |
|
4801 |
// care about the actual exit code, for now. |
|
4802 |
||
4803 |
int status; |
|
4804 |
||
4805 |
// Wait for the child process to exit. This returns immediately if |
|
4806 |
// the child has already exited. */ |
|
4807 |
while (waitpid(pid, &status, 0) < 0) { |
|
4808 |
switch (errno) { |
|
4809 |
case ECHILD: return 0; |
|
4810 |
case EINTR: break; |
|
4811 |
default: return -1; |
|
4812 |
} |
|
4813 |
} |
|
4814 |
||
4815 |
if (WIFEXITED(status)) { |
|
4816 |
// The child exited normally; get its exit code. |
|
4817 |
return WEXITSTATUS(status); |
|
4818 |
} else if (WIFSIGNALED(status)) { |
|
4819 |
// The child exited because of a signal |
|
4820 |
// The best value to return is 0x80 + signal number, |
|
4821 |
// because that is what all Unix shells do, and because |
|
4822 |
// it allows callers to distinguish between process exit and |
|
4823 |
// process death by signal. |
|
4824 |
return 0x80 + WTERMSIG(status); |
|
4825 |
} else { |
|
4826 |
// Unknown exit code; pass it through |
|
4827 |
return status; |
|
4828 |
} |
|
4829 |
} |
|
4830 |
} |