author | coleenp |
Sun, 13 Apr 2008 17:43:42 -0400 | |
changeset 360 | 21d113ecbf6a |
parent 252 | 050143a0dbfb |
child 389 | a44227868a4a |
permissions | -rw-r--r-- |
1 | 1 |
/* |
2 |
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifdef _WIN64 |
|
26 |
// Must be at least Windows 2000 or XP to use VectoredExceptions |
|
27 |
#define _WIN32_WINNT 0x500 |
|
28 |
#endif |
|
29 |
||
30 |
// do not include precompiled header file |
|
31 |
# include "incls/_os_windows.cpp.incl" |
|
32 |
||
33 |
#ifdef _DEBUG |
|
34 |
#include <crtdbg.h> |
|
35 |
#endif |
|
36 |
||
37 |
||
38 |
#include <windows.h> |
|
39 |
#include <sys/types.h> |
|
40 |
#include <sys/stat.h> |
|
41 |
#include <sys/timeb.h> |
|
42 |
#include <objidl.h> |
|
43 |
#include <shlobj.h> |
|
44 |
||
45 |
#include <malloc.h> |
|
46 |
#include <signal.h> |
|
47 |
#include <direct.h> |
|
48 |
#include <errno.h> |
|
49 |
#include <fcntl.h> |
|
50 |
#include <io.h> |
|
51 |
#include <process.h> // For _beginthreadex(), _endthreadex() |
|
52 |
#include <imagehlp.h> // For os::dll_address_to_function_name |
|
53 |
||
54 |
/* for enumerating dll libraries */ |
|
55 |
#include <tlhelp32.h> |
|
56 |
#include <vdmdbg.h> |
|
57 |
||
58 |
// for timer info max values which include all bits |
|
59 |
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) |
|
60 |
||
61 |
// For DLL loading/load error detection |
|
62 |
// Values of PE COFF |
|
63 |
#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c |
|
64 |
#define IMAGE_FILE_SIGNATURE_LENGTH 4 |
|
65 |
||
66 |
static HANDLE main_process; |
|
67 |
static HANDLE main_thread; |
|
68 |
static int main_thread_id; |
|
69 |
||
70 |
static FILETIME process_creation_time; |
|
71 |
static FILETIME process_exit_time; |
|
72 |
static FILETIME process_user_time; |
|
73 |
static FILETIME process_kernel_time; |
|
74 |
||
75 |
#ifdef _WIN64 |
|
76 |
PVOID topLevelVectoredExceptionHandler = NULL; |
|
77 |
#endif |
|
78 |
||
79 |
#ifdef _M_IA64 |
|
80 |
#define __CPU__ ia64 |
|
81 |
#elif _M_AMD64 |
|
82 |
#define __CPU__ amd64 |
|
83 |
#else |
|
84 |
#define __CPU__ i486 |
|
85 |
#endif |
|
86 |
||
87 |
// save DLL module handle, used by GetModuleFileName |
|
88 |
||
89 |
HINSTANCE vm_lib_handle; |
|
90 |
static int getLastErrorString(char *buf, size_t len); |
|
91 |
||
92 |
BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { |
|
93 |
switch (reason) { |
|
94 |
case DLL_PROCESS_ATTACH: |
|
95 |
vm_lib_handle = hinst; |
|
96 |
if(ForceTimeHighResolution) |
|
97 |
timeBeginPeriod(1L); |
|
98 |
break; |
|
99 |
case DLL_PROCESS_DETACH: |
|
100 |
if(ForceTimeHighResolution) |
|
101 |
timeEndPeriod(1L); |
|
102 |
#ifdef _WIN64 |
|
103 |
if (topLevelVectoredExceptionHandler != NULL) { |
|
104 |
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler); |
|
105 |
topLevelVectoredExceptionHandler = NULL; |
|
106 |
} |
|
107 |
#endif |
|
108 |
break; |
|
109 |
default: |
|
110 |
break; |
|
111 |
} |
|
112 |
return true; |
|
113 |
} |
|
114 |
||
115 |
static inline double fileTimeAsDouble(FILETIME* time) { |
|
116 |
const double high = (double) ((unsigned int) ~0); |
|
117 |
const double split = 10000000.0; |
|
118 |
double result = (time->dwLowDateTime / split) + |
|
119 |
time->dwHighDateTime * (high/split); |
|
120 |
return result; |
|
121 |
} |
|
122 |
||
123 |
// Implementation of os |
|
124 |
||
125 |
bool os::getenv(const char* name, char* buffer, int len) { |
|
126 |
int result = GetEnvironmentVariable(name, buffer, len); |
|
127 |
return result > 0 && result < len; |
|
128 |
} |
|
129 |
||
130 |
||
131 |
// No setuid programs under Windows. |
|
132 |
bool os::have_special_privileges() { |
|
133 |
return false; |
|
134 |
} |
|
135 |
||
136 |
||
137 |
// This method is a periodic task to check for misbehaving JNI applications |
|
138 |
// under CheckJNI, we can add any periodic checks here. |
|
139 |
// For Windows at the moment does nothing |
|
140 |
void os::run_periodic_checks() { |
|
141 |
return; |
|
142 |
} |
|
143 |
||
144 |
#ifndef _WIN64 |
|
145 |
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); |
|
146 |
#endif |
|
147 |
void os::init_system_properties_values() { |
|
148 |
/* sysclasspath, java_home, dll_dir */ |
|
149 |
{ |
|
150 |
char *home_path; |
|
151 |
char *dll_path; |
|
152 |
char *pslash; |
|
153 |
char *bin = "\\bin"; |
|
154 |
char home_dir[MAX_PATH]; |
|
155 |
||
156 |
if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { |
|
157 |
os::jvm_path(home_dir, sizeof(home_dir)); |
|
158 |
// Found the full path to jvm[_g].dll. |
|
159 |
// Now cut the path to <java_home>/jre if we can. |
|
160 |
*(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ |
|
161 |
pslash = strrchr(home_dir, '\\'); |
|
162 |
if (pslash != NULL) { |
|
163 |
*pslash = '\0'; /* get rid of \{client|server} */ |
|
164 |
pslash = strrchr(home_dir, '\\'); |
|
165 |
if (pslash != NULL) |
|
166 |
*pslash = '\0'; /* get rid of \bin */ |
|
167 |
} |
|
168 |
} |
|
169 |
||
170 |
home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1); |
|
171 |
if (home_path == NULL) |
|
172 |
return; |
|
173 |
strcpy(home_path, home_dir); |
|
174 |
Arguments::set_java_home(home_path); |
|
175 |
||
176 |
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1); |
|
177 |
if (dll_path == NULL) |
|
178 |
return; |
|
179 |
strcpy(dll_path, home_dir); |
|
180 |
strcat(dll_path, bin); |
|
181 |
Arguments::set_dll_dir(dll_path); |
|
182 |
||
183 |
if (!set_boot_path('\\', ';')) |
|
184 |
return; |
|
185 |
} |
|
186 |
||
187 |
/* library_path */ |
|
188 |
#define EXT_DIR "\\lib\\ext" |
|
189 |
#define BIN_DIR "\\bin" |
|
190 |
#define PACKAGE_DIR "\\Sun\\Java" |
|
191 |
{ |
|
192 |
/* Win32 library search order (See the documentation for LoadLibrary): |
|
193 |
* |
|
194 |
* 1. The directory from which application is loaded. |
|
195 |
* 2. The current directory |
|
196 |
* 3. The system wide Java Extensions directory (Java only) |
|
197 |
* 4. System directory (GetSystemDirectory) |
|
198 |
* 5. Windows directory (GetWindowsDirectory) |
|
199 |
* 6. The PATH environment variable |
|
200 |
*/ |
|
201 |
||
202 |
char *library_path; |
|
203 |
char tmp[MAX_PATH]; |
|
204 |
char *path_str = ::getenv("PATH"); |
|
205 |
||
206 |
library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + |
|
207 |
sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10); |
|
208 |
||
209 |
library_path[0] = '\0'; |
|
210 |
||
211 |
GetModuleFileName(NULL, tmp, sizeof(tmp)); |
|
212 |
*(strrchr(tmp, '\\')) = '\0'; |
|
213 |
strcat(library_path, tmp); |
|
214 |
||
215 |
strcat(library_path, ";."); |
|
216 |
||
217 |
GetWindowsDirectory(tmp, sizeof(tmp)); |
|
218 |
strcat(library_path, ";"); |
|
219 |
strcat(library_path, tmp); |
|
220 |
strcat(library_path, PACKAGE_DIR BIN_DIR); |
|
221 |
||
222 |
GetSystemDirectory(tmp, sizeof(tmp)); |
|
223 |
strcat(library_path, ";"); |
|
224 |
strcat(library_path, tmp); |
|
225 |
||
226 |
GetWindowsDirectory(tmp, sizeof(tmp)); |
|
227 |
strcat(library_path, ";"); |
|
228 |
strcat(library_path, tmp); |
|
229 |
||
230 |
if (path_str) { |
|
231 |
strcat(library_path, ";"); |
|
232 |
strcat(library_path, path_str); |
|
233 |
} |
|
234 |
||
235 |
Arguments::set_library_path(library_path); |
|
236 |
FREE_C_HEAP_ARRAY(char, library_path); |
|
237 |
} |
|
238 |
||
239 |
/* Default extensions directory */ |
|
240 |
{ |
|
241 |
char path[MAX_PATH]; |
|
242 |
char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; |
|
243 |
GetWindowsDirectory(path, MAX_PATH); |
|
244 |
sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, |
|
245 |
path, PACKAGE_DIR, EXT_DIR); |
|
246 |
Arguments::set_ext_dirs(buf); |
|
247 |
} |
|
248 |
#undef EXT_DIR |
|
249 |
#undef BIN_DIR |
|
250 |
#undef PACKAGE_DIR |
|
251 |
||
252 |
/* Default endorsed standards directory. */ |
|
253 |
{ |
|
254 |
#define ENDORSED_DIR "\\lib\\endorsed" |
|
255 |
size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); |
|
256 |
char * buf = NEW_C_HEAP_ARRAY(char, len); |
|
257 |
sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); |
|
258 |
Arguments::set_endorsed_dirs(buf); |
|
259 |
#undef ENDORSED_DIR |
|
260 |
} |
|
261 |
||
262 |
#ifndef _WIN64 |
|
263 |
SetUnhandledExceptionFilter(Handle_FLT_Exception); |
|
264 |
#endif |
|
265 |
||
266 |
// Done |
|
267 |
return; |
|
268 |
} |
|
269 |
||
270 |
void os::breakpoint() { |
|
271 |
DebugBreak(); |
|
272 |
} |
|
273 |
||
274 |
// Invoked from the BREAKPOINT Macro |
|
275 |
extern "C" void breakpoint() { |
|
276 |
os::breakpoint(); |
|
277 |
} |
|
278 |
||
279 |
// Returns an estimate of the current stack pointer. Result must be guaranteed |
|
280 |
// to point into the calling threads stack, and be no lower than the current |
|
281 |
// stack pointer. |
|
282 |
||
283 |
address os::current_stack_pointer() { |
|
284 |
int dummy; |
|
285 |
address sp = (address)&dummy; |
|
286 |
return sp; |
|
287 |
} |
|
288 |
||
289 |
// os::current_stack_base() |
|
290 |
// |
|
291 |
// Returns the base of the stack, which is the stack's |
|
292 |
// starting address. This function must be called |
|
293 |
// while running on the stack of the thread being queried. |
|
294 |
||
295 |
address os::current_stack_base() { |
|
296 |
MEMORY_BASIC_INFORMATION minfo; |
|
297 |
address stack_bottom; |
|
298 |
size_t stack_size; |
|
299 |
||
300 |
VirtualQuery(&minfo, &minfo, sizeof(minfo)); |
|
301 |
stack_bottom = (address)minfo.AllocationBase; |
|
302 |
stack_size = minfo.RegionSize; |
|
303 |
||
304 |
// Add up the sizes of all the regions with the same |
|
305 |
// AllocationBase. |
|
306 |
while( 1 ) |
|
307 |
{ |
|
308 |
VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); |
|
309 |
if ( stack_bottom == (address)minfo.AllocationBase ) |
|
310 |
stack_size += minfo.RegionSize; |
|
311 |
else |
|
312 |
break; |
|
313 |
} |
|
314 |
||
315 |
#ifdef _M_IA64 |
|
316 |
// IA64 has memory and register stacks |
|
317 |
stack_size = stack_size / 2; |
|
318 |
#endif |
|
319 |
return stack_bottom + stack_size; |
|
320 |
} |
|
321 |
||
322 |
size_t os::current_stack_size() { |
|
323 |
size_t sz; |
|
324 |
MEMORY_BASIC_INFORMATION minfo; |
|
325 |
VirtualQuery(&minfo, &minfo, sizeof(minfo)); |
|
326 |
sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; |
|
327 |
return sz; |
|
328 |
} |
|
329 |
||
330 |
||
331 |
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); |
|
332 |
||
333 |
// Thread start routine for all new Java threads |
|
334 |
static unsigned __stdcall java_start(Thread* thread) { |
|
335 |
// Try to randomize the cache line index of hot stack frames. |
|
336 |
// This helps when threads of the same stack traces evict each other's |
|
337 |
// cache lines. The threads can be either from the same JVM instance, or |
|
338 |
// from different JVM instances. The benefit is especially true for |
|
339 |
// processors with hyperthreading technology. |
|
340 |
static int counter = 0; |
|
341 |
int pid = os::current_process_id(); |
|
342 |
_alloca(((pid ^ counter++) & 7) * 128); |
|
343 |
||
344 |
OSThread* osthr = thread->osthread(); |
|
345 |
assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); |
|
346 |
||
347 |
if (UseNUMA) { |
|
348 |
int lgrp_id = os::numa_get_group_id(); |
|
349 |
if (lgrp_id != -1) { |
|
350 |
thread->set_lgrp_id(lgrp_id); |
|
351 |
} |
|
352 |
} |
|
353 |
||
354 |
||
355 |
if (UseVectoredExceptions) { |
|
356 |
// If we are using vectored exception we don't need to set a SEH |
|
357 |
thread->run(); |
|
358 |
} |
|
359 |
else { |
|
360 |
// Install a win32 structured exception handler around every thread created |
|
361 |
// by VM, so VM can genrate error dump when an exception occurred in non- |
|
362 |
// Java thread (e.g. VM thread). |
|
363 |
__try { |
|
364 |
thread->run(); |
|
365 |
} __except(topLevelExceptionFilter( |
|
366 |
(_EXCEPTION_POINTERS*)_exception_info())) { |
|
367 |
// Nothing to do. |
|
368 |
} |
|
369 |
} |
|
370 |
||
371 |
// One less thread is executing |
|
372 |
// When the VMThread gets here, the main thread may have already exited |
|
373 |
// which frees the CodeHeap containing the Atomic::add code |
|
374 |
if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { |
|
375 |
Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); |
|
376 |
} |
|
377 |
||
378 |
return 0; |
|
379 |
} |
|
380 |
||
381 |
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { |
|
382 |
// Allocate the OSThread object |
|
383 |
OSThread* osthread = new OSThread(NULL, NULL); |
|
384 |
if (osthread == NULL) return NULL; |
|
385 |
||
386 |
// Initialize support for Java interrupts |
|
387 |
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); |
|
388 |
if (interrupt_event == NULL) { |
|
389 |
delete osthread; |
|
390 |
return NULL; |
|
391 |
} |
|
392 |
osthread->set_interrupt_event(interrupt_event); |
|
393 |
||
394 |
// Store info on the Win32 thread into the OSThread |
|
395 |
osthread->set_thread_handle(thread_handle); |
|
396 |
osthread->set_thread_id(thread_id); |
|
397 |
||
398 |
if (UseNUMA) { |
|
399 |
int lgrp_id = os::numa_get_group_id(); |
|
400 |
if (lgrp_id != -1) { |
|
401 |
thread->set_lgrp_id(lgrp_id); |
|
402 |
} |
|
403 |
} |
|
404 |
||
405 |
// Initial thread state is INITIALIZED, not SUSPENDED |
|
406 |
osthread->set_state(INITIALIZED); |
|
407 |
||
408 |
return osthread; |
|
409 |
} |
|
410 |
||
411 |
||
412 |
bool os::create_attached_thread(JavaThread* thread) { |
|
413 |
#ifdef ASSERT |
|
414 |
thread->verify_not_published(); |
|
415 |
#endif |
|
416 |
HANDLE thread_h; |
|
417 |
if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), |
|
418 |
&thread_h, THREAD_ALL_ACCESS, false, 0)) { |
|
419 |
fatal("DuplicateHandle failed\n"); |
|
420 |
} |
|
421 |
OSThread* osthread = create_os_thread(thread, thread_h, |
|
422 |
(int)current_thread_id()); |
|
423 |
if (osthread == NULL) { |
|
424 |
return false; |
|
425 |
} |
|
426 |
||
427 |
// Initial thread state is RUNNABLE |
|
428 |
osthread->set_state(RUNNABLE); |
|
429 |
||
430 |
thread->set_osthread(osthread); |
|
431 |
return true; |
|
432 |
} |
|
433 |
||
434 |
bool os::create_main_thread(JavaThread* thread) { |
|
435 |
#ifdef ASSERT |
|
436 |
thread->verify_not_published(); |
|
437 |
#endif |
|
438 |
if (_starting_thread == NULL) { |
|
439 |
_starting_thread = create_os_thread(thread, main_thread, main_thread_id); |
|
440 |
if (_starting_thread == NULL) { |
|
441 |
return false; |
|
442 |
} |
|
443 |
} |
|
444 |
||
445 |
// The primordial thread is runnable from the start) |
|
446 |
_starting_thread->set_state(RUNNABLE); |
|
447 |
||
448 |
thread->set_osthread(_starting_thread); |
|
449 |
return true; |
|
450 |
} |
|
451 |
||
452 |
// Allocate and initialize a new OSThread |
|
453 |
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { |
|
454 |
unsigned thread_id; |
|
455 |
||
456 |
// Allocate the OSThread object |
|
457 |
OSThread* osthread = new OSThread(NULL, NULL); |
|
458 |
if (osthread == NULL) { |
|
459 |
return false; |
|
460 |
} |
|
461 |
||
462 |
// Initialize support for Java interrupts |
|
463 |
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); |
|
464 |
if (interrupt_event == NULL) { |
|
465 |
delete osthread; |
|
466 |
return NULL; |
|
467 |
} |
|
468 |
osthread->set_interrupt_event(interrupt_event); |
|
469 |
osthread->set_interrupted(false); |
|
470 |
||
471 |
thread->set_osthread(osthread); |
|
472 |
||
473 |
if (stack_size == 0) { |
|
474 |
switch (thr_type) { |
|
475 |
case os::java_thread: |
|
476 |
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss |
|
477 |
if (JavaThread::stack_size_at_create() > 0) |
|
478 |
stack_size = JavaThread::stack_size_at_create(); |
|
479 |
break; |
|
480 |
case os::compiler_thread: |
|
481 |
if (CompilerThreadStackSize > 0) { |
|
482 |
stack_size = (size_t)(CompilerThreadStackSize * K); |
|
483 |
break; |
|
484 |
} // else fall through: |
|
485 |
// use VMThreadStackSize if CompilerThreadStackSize is not defined |
|
486 |
case os::vm_thread: |
|
487 |
case os::pgc_thread: |
|
488 |
case os::cgc_thread: |
|
489 |
case os::watcher_thread: |
|
490 |
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); |
|
491 |
break; |
|
492 |
} |
|
493 |
} |
|
494 |
||
495 |
// Create the Win32 thread |
|
496 |
// |
|
497 |
// Contrary to what MSDN document says, "stack_size" in _beginthreadex() |
|
498 |
// does not specify stack size. Instead, it specifies the size of |
|
499 |
// initially committed space. The stack size is determined by |
|
500 |
// PE header in the executable. If the committed "stack_size" is larger |
|
501 |
// than default value in the PE header, the stack is rounded up to the |
|
502 |
// nearest multiple of 1MB. For example if the launcher has default |
|
503 |
// stack size of 320k, specifying any size less than 320k does not |
|
504 |
// affect the actual stack size at all, it only affects the initial |
|
505 |
// commitment. On the other hand, specifying 'stack_size' larger than |
|
506 |
// default value may cause significant increase in memory usage, because |
|
507 |
// not only the stack space will be rounded up to MB, but also the |
|
508 |
// entire space is committed upfront. |
|
509 |
// |
|
510 |
// Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' |
|
511 |
// for CreateThread() that can treat 'stack_size' as stack size. However we |
|
512 |
// are not supposed to call CreateThread() directly according to MSDN |
|
513 |
// document because JVM uses C runtime library. The good news is that the |
|
514 |
// flag appears to work with _beginthredex() as well. |
|
515 |
||
516 |
#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION |
|
517 |
#define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) |
|
518 |
#endif |
|
519 |
||
520 |
HANDLE thread_handle = |
|
521 |
(HANDLE)_beginthreadex(NULL, |
|
522 |
(unsigned)stack_size, |
|
523 |
(unsigned (__stdcall *)(void*)) java_start, |
|
524 |
thread, |
|
525 |
CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, |
|
526 |
&thread_id); |
|
527 |
if (thread_handle == NULL) { |
|
528 |
// perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again |
|
529 |
// without the flag. |
|
530 |
thread_handle = |
|
531 |
(HANDLE)_beginthreadex(NULL, |
|
532 |
(unsigned)stack_size, |
|
533 |
(unsigned (__stdcall *)(void*)) java_start, |
|
534 |
thread, |
|
535 |
CREATE_SUSPENDED, |
|
536 |
&thread_id); |
|
537 |
} |
|
538 |
if (thread_handle == NULL) { |
|
539 |
// Need to clean up stuff we've allocated so far |
|
540 |
CloseHandle(osthread->interrupt_event()); |
|
541 |
thread->set_osthread(NULL); |
|
542 |
delete osthread; |
|
543 |
return NULL; |
|
544 |
} |
|
545 |
||
546 |
Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); |
|
547 |
||
548 |
// Store info on the Win32 thread into the OSThread |
|
549 |
osthread->set_thread_handle(thread_handle); |
|
550 |
osthread->set_thread_id(thread_id); |
|
551 |
||
552 |
// Initial thread state is INITIALIZED, not SUSPENDED |
|
553 |
osthread->set_state(INITIALIZED); |
|
554 |
||
555 |
// The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain |
|
556 |
return true; |
|
557 |
} |
|
558 |
||
559 |
||
560 |
// Free Win32 resources related to the OSThread |
|
561 |
void os::free_thread(OSThread* osthread) { |
|
562 |
assert(osthread != NULL, "osthread not set"); |
|
563 |
CloseHandle(osthread->thread_handle()); |
|
564 |
CloseHandle(osthread->interrupt_event()); |
|
565 |
delete osthread; |
|
566 |
} |
|
567 |
||
568 |
||
569 |
static int has_performance_count = 0; |
|
570 |
static jlong first_filetime; |
|
571 |
static jlong initial_performance_count; |
|
572 |
static jlong performance_frequency; |
|
573 |
||
574 |
||
575 |
jlong as_long(LARGE_INTEGER x) { |
|
576 |
jlong result = 0; // initialization to avoid warning |
|
577 |
set_high(&result, x.HighPart); |
|
578 |
set_low(&result, x.LowPart); |
|
579 |
return result; |
|
580 |
} |
|
581 |
||
582 |
||
583 |
jlong os::elapsed_counter() { |
|
584 |
LARGE_INTEGER count; |
|
585 |
if (has_performance_count) { |
|
586 |
QueryPerformanceCounter(&count); |
|
587 |
return as_long(count) - initial_performance_count; |
|
588 |
} else { |
|
589 |
FILETIME wt; |
|
590 |
GetSystemTimeAsFileTime(&wt); |
|
591 |
return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); |
|
592 |
} |
|
593 |
} |
|
594 |
||
595 |
||
596 |
jlong os::elapsed_frequency() { |
|
597 |
if (has_performance_count) { |
|
598 |
return performance_frequency; |
|
599 |
} else { |
|
600 |
// the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. |
|
601 |
return 10000000; |
|
602 |
} |
|
603 |
} |
|
604 |
||
605 |
||
606 |
julong os::available_memory() { |
|
607 |
return win32::available_memory(); |
|
608 |
} |
|
609 |
||
610 |
julong os::win32::available_memory() { |
|
611 |
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory |
|
612 |
// is larger than 4GB |
|
613 |
MEMORYSTATUS ms; |
|
614 |
GlobalMemoryStatus(&ms); |
|
615 |
||
616 |
return (julong)ms.dwAvailPhys; |
|
617 |
} |
|
618 |
||
619 |
julong os::physical_memory() { |
|
620 |
return win32::physical_memory(); |
|
621 |
} |
|
622 |
||
623 |
julong os::allocatable_physical_memory(julong size) { |
|
193
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
624 |
#ifdef _LP64 |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
625 |
return size; |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
626 |
#else |
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
627 |
// Limit to 1400m because of the 2gb address space wall |
1 | 628 |
return MIN2(size, (julong)1400*M); |
193
171c404abf72
6629887: 64-bit windows should not restrict default heap size to 1400m
phh
parents:
1
diff
changeset
|
629 |
#endif |
1 | 630 |
} |
631 |
||
632 |
// VC6 lacks DWORD_PTR |
|
633 |
#if _MSC_VER < 1300 |
|
634 |
typedef UINT_PTR DWORD_PTR; |
|
635 |
#endif |
|
636 |
||
637 |
int os::active_processor_count() { |
|
638 |
DWORD_PTR lpProcessAffinityMask = 0; |
|
639 |
DWORD_PTR lpSystemAffinityMask = 0; |
|
640 |
int proc_count = processor_count(); |
|
641 |
if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && |
|
642 |
GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { |
|
643 |
// Nof active processors is number of bits in process affinity mask |
|
644 |
int bitcount = 0; |
|
645 |
while (lpProcessAffinityMask != 0) { |
|
646 |
lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); |
|
647 |
bitcount++; |
|
648 |
} |
|
649 |
return bitcount; |
|
650 |
} else { |
|
651 |
return proc_count; |
|
652 |
} |
|
653 |
} |
|
654 |
||
655 |
bool os::distribute_processes(uint length, uint* distribution) { |
|
656 |
// Not yet implemented. |
|
657 |
return false; |
|
658 |
} |
|
659 |
||
660 |
bool os::bind_to_processor(uint processor_id) { |
|
661 |
// Not yet implemented. |
|
662 |
return false; |
|
663 |
} |
|
664 |
||
665 |
static void initialize_performance_counter() { |
|
666 |
LARGE_INTEGER count; |
|
667 |
if (QueryPerformanceFrequency(&count)) { |
|
668 |
has_performance_count = 1; |
|
669 |
performance_frequency = as_long(count); |
|
670 |
QueryPerformanceCounter(&count); |
|
671 |
initial_performance_count = as_long(count); |
|
672 |
} else { |
|
673 |
has_performance_count = 0; |
|
674 |
FILETIME wt; |
|
675 |
GetSystemTimeAsFileTime(&wt); |
|
676 |
first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); |
|
677 |
} |
|
678 |
} |
|
679 |
||
680 |
||
681 |
double os::elapsedTime() { |
|
682 |
return (double) elapsed_counter() / (double) elapsed_frequency(); |
|
683 |
} |
|
684 |
||
685 |
||
686 |
// Windows format: |
|
687 |
// The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. |
|
688 |
// Java format: |
|
689 |
// Java standards require the number of milliseconds since 1/1/1970 |
|
690 |
||
691 |
// Constant offset - calculated using offset() |
|
692 |
static jlong _offset = 116444736000000000; |
|
693 |
// Fake time counter for reproducible results when debugging |
|
694 |
static jlong fake_time = 0; |
|
695 |
||
696 |
#ifdef ASSERT |
|
697 |
// Just to be safe, recalculate the offset in debug mode |
|
698 |
static jlong _calculated_offset = 0; |
|
699 |
static int _has_calculated_offset = 0; |
|
700 |
||
701 |
jlong offset() { |
|
702 |
if (_has_calculated_offset) return _calculated_offset; |
|
703 |
SYSTEMTIME java_origin; |
|
704 |
java_origin.wYear = 1970; |
|
705 |
java_origin.wMonth = 1; |
|
706 |
java_origin.wDayOfWeek = 0; // ignored |
|
707 |
java_origin.wDay = 1; |
|
708 |
java_origin.wHour = 0; |
|
709 |
java_origin.wMinute = 0; |
|
710 |
java_origin.wSecond = 0; |
|
711 |
java_origin.wMilliseconds = 0; |
|
712 |
FILETIME jot; |
|
713 |
if (!SystemTimeToFileTime(&java_origin, &jot)) { |
|
714 |
fatal1("Error = %d\nWindows error", GetLastError()); |
|
715 |
} |
|
716 |
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); |
|
717 |
_has_calculated_offset = 1; |
|
718 |
assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); |
|
719 |
return _calculated_offset; |
|
720 |
} |
|
721 |
#else |
|
722 |
jlong offset() { |
|
723 |
return _offset; |
|
724 |
} |
|
725 |
#endif |
|
726 |
||
727 |
jlong windows_to_java_time(FILETIME wt) { |
|
728 |
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); |
|
729 |
return (a - offset()) / 10000; |
|
730 |
} |
|
731 |
||
732 |
FILETIME java_to_windows_time(jlong l) { |
|
733 |
jlong a = (l * 10000) + offset(); |
|
734 |
FILETIME result; |
|
735 |
result.dwHighDateTime = high(a); |
|
736 |
result.dwLowDateTime = low(a); |
|
737 |
return result; |
|
738 |
} |
|
739 |
||
740 |
jlong os::javaTimeMillis() { |
|
741 |
if (UseFakeTimers) { |
|
742 |
return fake_time++; |
|
743 |
} else { |
|
234 | 744 |
FILETIME wt; |
745 |
GetSystemTimeAsFileTime(&wt); |
|
746 |
return windows_to_java_time(wt); |
|
1 | 747 |
} |
748 |
} |
|
749 |
||
750 |
#define NANOS_PER_SEC CONST64(1000000000) |
|
751 |
#define NANOS_PER_MILLISEC 1000000 |
|
752 |
jlong os::javaTimeNanos() { |
|
753 |
if (!has_performance_count) { |
|
754 |
return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do. |
|
755 |
} else { |
|
756 |
LARGE_INTEGER current_count; |
|
757 |
QueryPerformanceCounter(¤t_count); |
|
758 |
double current = as_long(current_count); |
|
759 |
double freq = performance_frequency; |
|
760 |
jlong time = (jlong)((current/freq) * NANOS_PER_SEC); |
|
761 |
return time; |
|
762 |
} |
|
763 |
} |
|
764 |
||
765 |
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { |
|
766 |
if (!has_performance_count) { |
|
767 |
// javaTimeMillis() doesn't have much percision, |
|
768 |
// but it is not going to wrap -- so all 64 bits |
|
769 |
info_ptr->max_value = ALL_64_BITS; |
|
770 |
||
771 |
// this is a wall clock timer, so may skip |
|
772 |
info_ptr->may_skip_backward = true; |
|
773 |
info_ptr->may_skip_forward = true; |
|
774 |
} else { |
|
775 |
jlong freq = performance_frequency; |
|
776 |
if (freq < NANOS_PER_SEC) { |
|
777 |
// the performance counter is 64 bits and we will |
|
778 |
// be multiplying it -- so no wrap in 64 bits |
|
779 |
info_ptr->max_value = ALL_64_BITS; |
|
780 |
} else if (freq > NANOS_PER_SEC) { |
|
781 |
// use the max value the counter can reach to |
|
782 |
// determine the max value which could be returned |
|
783 |
julong max_counter = (julong)ALL_64_BITS; |
|
784 |
info_ptr->max_value = (jlong)(max_counter / (freq / NANOS_PER_SEC)); |
|
785 |
} else { |
|
786 |
// the performance counter is 64 bits and we will |
|
787 |
// be using it directly -- so no wrap in 64 bits |
|
788 |
info_ptr->max_value = ALL_64_BITS; |
|
789 |
} |
|
790 |
||
791 |
// using a counter, so no skipping |
|
792 |
info_ptr->may_skip_backward = false; |
|
793 |
info_ptr->may_skip_forward = false; |
|
794 |
} |
|
795 |
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time |
|
796 |
} |
|
797 |
||
798 |
char* os::local_time_string(char *buf, size_t buflen) { |
|
799 |
SYSTEMTIME st; |
|
800 |
GetLocalTime(&st); |
|
801 |
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", |
|
802 |
st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); |
|
803 |
return buf; |
|
804 |
} |
|
805 |
||
806 |
bool os::getTimesSecs(double* process_real_time, |
|
807 |
double* process_user_time, |
|
808 |
double* process_system_time) { |
|
809 |
HANDLE h_process = GetCurrentProcess(); |
|
810 |
FILETIME create_time, exit_time, kernel_time, user_time; |
|
811 |
BOOL result = GetProcessTimes(h_process, |
|
812 |
&create_time, |
|
813 |
&exit_time, |
|
814 |
&kernel_time, |
|
815 |
&user_time); |
|
816 |
if (result != 0) { |
|
817 |
FILETIME wt; |
|
818 |
GetSystemTimeAsFileTime(&wt); |
|
819 |
jlong rtc_millis = windows_to_java_time(wt); |
|
820 |
jlong user_millis = windows_to_java_time(user_time); |
|
821 |
jlong system_millis = windows_to_java_time(kernel_time); |
|
822 |
*process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); |
|
823 |
*process_user_time = ((double) user_millis) / ((double) MILLIUNITS); |
|
824 |
*process_system_time = ((double) system_millis) / ((double) MILLIUNITS); |
|
825 |
return true; |
|
826 |
} else { |
|
827 |
return false; |
|
828 |
} |
|
829 |
} |
|
830 |
||
831 |
void os::shutdown() { |
|
832 |
||
833 |
// allow PerfMemory to attempt cleanup of any persistent resources |
|
834 |
perfMemory_exit(); |
|
835 |
||
836 |
// flush buffered output, finish log files |
|
837 |
ostream_abort(); |
|
838 |
||
839 |
// Check for abort hook |
|
840 |
abort_hook_t abort_hook = Arguments::abort_hook(); |
|
841 |
if (abort_hook != NULL) { |
|
842 |
abort_hook(); |
|
843 |
} |
|
844 |
} |
|
845 |
||
846 |
void os::abort(bool dump_core) |
|
847 |
{ |
|
848 |
os::shutdown(); |
|
849 |
// no core dump on Windows |
|
850 |
::exit(1); |
|
851 |
} |
|
852 |
||
853 |
// Die immediately, no exit hook, no abort hook, no cleanup. |
|
854 |
void os::die() { |
|
855 |
_exit(-1); |
|
856 |
} |
|
857 |
||
858 |
// Directory routines copied from src/win32/native/java/io/dirent_md.c |
|
859 |
// * dirent_md.c 1.15 00/02/02 |
|
860 |
// |
|
861 |
// The declarations for DIR and struct dirent are in jvm_win32.h. |
|
862 |
||
863 |
/* Caller must have already run dirname through JVM_NativePath, which removes |
|
864 |
duplicate slashes and converts all instances of '/' into '\\'. */ |
|
865 |
||
866 |
DIR * |
|
867 |
os::opendir(const char *dirname) |
|
868 |
{ |
|
869 |
assert(dirname != NULL, "just checking"); // hotspot change |
|
870 |
DIR *dirp = (DIR *)malloc(sizeof(DIR)); |
|
871 |
DWORD fattr; // hotspot change |
|
872 |
char alt_dirname[4] = { 0, 0, 0, 0 }; |
|
873 |
||
874 |
if (dirp == 0) { |
|
875 |
errno = ENOMEM; |
|
876 |
return 0; |
|
877 |
} |
|
878 |
||
879 |
/* |
|
880 |
* Win32 accepts "\" in its POSIX stat(), but refuses to treat it |
|
881 |
* as a directory in FindFirstFile(). We detect this case here and |
|
882 |
* prepend the current drive name. |
|
883 |
*/ |
|
884 |
if (dirname[1] == '\0' && dirname[0] == '\\') { |
|
885 |
alt_dirname[0] = _getdrive() + 'A' - 1; |
|
886 |
alt_dirname[1] = ':'; |
|
887 |
alt_dirname[2] = '\\'; |
|
888 |
alt_dirname[3] = '\0'; |
|
889 |
dirname = alt_dirname; |
|
890 |
} |
|
891 |
||
892 |
dirp->path = (char *)malloc(strlen(dirname) + 5); |
|
893 |
if (dirp->path == 0) { |
|
894 |
free(dirp); |
|
895 |
errno = ENOMEM; |
|
896 |
return 0; |
|
897 |
} |
|
898 |
strcpy(dirp->path, dirname); |
|
899 |
||
900 |
fattr = GetFileAttributes(dirp->path); |
|
901 |
if (fattr == 0xffffffff) { |
|
902 |
free(dirp->path); |
|
903 |
free(dirp); |
|
904 |
errno = ENOENT; |
|
905 |
return 0; |
|
906 |
} else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { |
|
907 |
free(dirp->path); |
|
908 |
free(dirp); |
|
909 |
errno = ENOTDIR; |
|
910 |
return 0; |
|
911 |
} |
|
912 |
||
913 |
/* Append "*.*", or possibly "\\*.*", to path */ |
|
914 |
if (dirp->path[1] == ':' |
|
915 |
&& (dirp->path[2] == '\0' |
|
916 |
|| (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { |
|
917 |
/* No '\\' needed for cases like "Z:" or "Z:\" */ |
|
918 |
strcat(dirp->path, "*.*"); |
|
919 |
} else { |
|
920 |
strcat(dirp->path, "\\*.*"); |
|
921 |
} |
|
922 |
||
923 |
dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); |
|
924 |
if (dirp->handle == INVALID_HANDLE_VALUE) { |
|
925 |
if (GetLastError() != ERROR_FILE_NOT_FOUND) { |
|
926 |
free(dirp->path); |
|
927 |
free(dirp); |
|
928 |
errno = EACCES; |
|
929 |
return 0; |
|
930 |
} |
|
931 |
} |
|
932 |
return dirp; |
|
933 |
} |
|
934 |
||
935 |
/* parameter dbuf unused on Windows */ |
|
936 |
||
937 |
struct dirent * |
|
938 |
os::readdir(DIR *dirp, dirent *dbuf) |
|
939 |
{ |
|
940 |
assert(dirp != NULL, "just checking"); // hotspot change |
|
941 |
if (dirp->handle == INVALID_HANDLE_VALUE) { |
|
942 |
return 0; |
|
943 |
} |
|
944 |
||
945 |
strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); |
|
946 |
||
947 |
if (!FindNextFile(dirp->handle, &dirp->find_data)) { |
|
948 |
if (GetLastError() == ERROR_INVALID_HANDLE) { |
|
949 |
errno = EBADF; |
|
950 |
return 0; |
|
951 |
} |
|
952 |
FindClose(dirp->handle); |
|
953 |
dirp->handle = INVALID_HANDLE_VALUE; |
|
954 |
} |
|
955 |
||
956 |
return &dirp->dirent; |
|
957 |
} |
|
958 |
||
959 |
int |
|
960 |
os::closedir(DIR *dirp) |
|
961 |
{ |
|
962 |
assert(dirp != NULL, "just checking"); // hotspot change |
|
963 |
if (dirp->handle != INVALID_HANDLE_VALUE) { |
|
964 |
if (!FindClose(dirp->handle)) { |
|
965 |
errno = EBADF; |
|
966 |
return -1; |
|
967 |
} |
|
968 |
dirp->handle = INVALID_HANDLE_VALUE; |
|
969 |
} |
|
970 |
free(dirp->path); |
|
971 |
free(dirp); |
|
972 |
return 0; |
|
973 |
} |
|
974 |
||
975 |
const char* os::dll_file_extension() { return ".dll"; } |
|
976 |
||
977 |
const char * os::get_temp_directory() |
|
978 |
{ |
|
979 |
static char path_buf[MAX_PATH]; |
|
980 |
if (GetTempPath(MAX_PATH, path_buf)>0) |
|
981 |
return path_buf; |
|
982 |
else{ |
|
983 |
path_buf[0]='\0'; |
|
984 |
return path_buf; |
|
985 |
} |
|
986 |
} |
|
987 |
||
988 |
// Needs to be in os specific directory because windows requires another |
|
989 |
// header file <direct.h> |
|
990 |
const char* os::get_current_directory(char *buf, int buflen) { |
|
991 |
return _getcwd(buf, buflen); |
|
992 |
} |
|
993 |
||
994 |
//----------------------------------------------------------- |
|
995 |
// Helper functions for fatal error handler |
|
996 |
||
997 |
// The following library functions are resolved dynamically at runtime: |
|
998 |
||
999 |
// PSAPI functions, for Windows NT, 2000, XP |
|
1000 |
||
1001 |
// psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform |
|
1002 |
// SDK from Microsoft. Here are the definitions copied from psapi.h |
|
1003 |
typedef struct _MODULEINFO { |
|
1004 |
LPVOID lpBaseOfDll; |
|
1005 |
DWORD SizeOfImage; |
|
1006 |
LPVOID EntryPoint; |
|
1007 |
} MODULEINFO, *LPMODULEINFO; |
|
1008 |
||
1009 |
static BOOL (WINAPI *_EnumProcessModules) ( HANDLE, HMODULE *, DWORD, LPDWORD ); |
|
1010 |
static DWORD (WINAPI *_GetModuleFileNameEx) ( HANDLE, HMODULE, LPTSTR, DWORD ); |
|
1011 |
static BOOL (WINAPI *_GetModuleInformation)( HANDLE, HMODULE, LPMODULEINFO, DWORD ); |
|
1012 |
||
1013 |
// ToolHelp Functions, for Windows 95, 98 and ME |
|
1014 |
||
1015 |
static HANDLE(WINAPI *_CreateToolhelp32Snapshot)(DWORD,DWORD) ; |
|
1016 |
static BOOL (WINAPI *_Module32First) (HANDLE,LPMODULEENTRY32) ; |
|
1017 |
static BOOL (WINAPI *_Module32Next) (HANDLE,LPMODULEENTRY32) ; |
|
1018 |
||
1019 |
bool _has_psapi; |
|
1020 |
bool _psapi_init = false; |
|
1021 |
bool _has_toolhelp; |
|
1022 |
||
1023 |
static bool _init_psapi() { |
|
1024 |
HINSTANCE psapi = LoadLibrary( "PSAPI.DLL" ) ; |
|
1025 |
if( psapi == NULL ) return false ; |
|
1026 |
||
1027 |
_EnumProcessModules = CAST_TO_FN_PTR( |
|
1028 |
BOOL(WINAPI *)(HANDLE, HMODULE *, DWORD, LPDWORD), |
|
1029 |
GetProcAddress(psapi, "EnumProcessModules")) ; |
|
1030 |
_GetModuleFileNameEx = CAST_TO_FN_PTR( |
|
1031 |
DWORD (WINAPI *)(HANDLE, HMODULE, LPTSTR, DWORD), |
|
1032 |
GetProcAddress(psapi, "GetModuleFileNameExA")); |
|
1033 |
_GetModuleInformation = CAST_TO_FN_PTR( |
|
1034 |
BOOL (WINAPI *)(HANDLE, HMODULE, LPMODULEINFO, DWORD), |
|
1035 |
GetProcAddress(psapi, "GetModuleInformation")); |
|
1036 |
||
1037 |
_has_psapi = (_EnumProcessModules && _GetModuleFileNameEx && _GetModuleInformation); |
|
1038 |
_psapi_init = true; |
|
1039 |
return _has_psapi; |
|
1040 |
} |
|
1041 |
||
1042 |
static bool _init_toolhelp() { |
|
1043 |
HINSTANCE kernel32 = LoadLibrary("Kernel32.DLL") ; |
|
1044 |
if (kernel32 == NULL) return false ; |
|
1045 |
||
1046 |
_CreateToolhelp32Snapshot = CAST_TO_FN_PTR( |
|
1047 |
HANDLE(WINAPI *)(DWORD,DWORD), |
|
1048 |
GetProcAddress(kernel32, "CreateToolhelp32Snapshot")); |
|
1049 |
_Module32First = CAST_TO_FN_PTR( |
|
1050 |
BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32), |
|
1051 |
GetProcAddress(kernel32, "Module32First" )); |
|
1052 |
_Module32Next = CAST_TO_FN_PTR( |
|
1053 |
BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32), |
|
1054 |
GetProcAddress(kernel32, "Module32Next" )); |
|
1055 |
||
1056 |
_has_toolhelp = (_CreateToolhelp32Snapshot && _Module32First && _Module32Next); |
|
1057 |
return _has_toolhelp; |
|
1058 |
} |
|
1059 |
||
1060 |
#ifdef _WIN64 |
|
1061 |
// Helper routine which returns true if address in |
|
1062 |
// within the NTDLL address space. |
|
1063 |
// |
|
1064 |
static bool _addr_in_ntdll( address addr ) |
|
1065 |
{ |
|
1066 |
HMODULE hmod; |
|
1067 |
MODULEINFO minfo; |
|
1068 |
||
1069 |
hmod = GetModuleHandle("NTDLL.DLL"); |
|
1070 |
if ( hmod == NULL ) return false; |
|
1071 |
if ( !_GetModuleInformation( GetCurrentProcess(), hmod, |
|
1072 |
&minfo, sizeof(MODULEINFO)) ) |
|
1073 |
return false; |
|
1074 |
||
1075 |
if ( (addr >= minfo.lpBaseOfDll) && |
|
1076 |
(addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) |
|
1077 |
return true; |
|
1078 |
else |
|
1079 |
return false; |
|
1080 |
} |
|
1081 |
#endif |
|
1082 |
||
1083 |
||
1084 |
// Enumerate all modules for a given process ID |
|
1085 |
// |
|
1086 |
// Notice that Windows 95/98/Me and Windows NT/2000/XP have |
|
1087 |
// different API for doing this. We use PSAPI.DLL on NT based |
|
1088 |
// Windows and ToolHelp on 95/98/Me. |
|
1089 |
||
1090 |
// Callback function that is called by enumerate_modules() on |
|
1091 |
// every DLL module. |
|
1092 |
// Input parameters: |
|
1093 |
// int pid, |
|
1094 |
// char* module_file_name, |
|
1095 |
// address module_base_addr, |
|
1096 |
// unsigned module_size, |
|
1097 |
// void* param |
|
1098 |
typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); |
|
1099 |
||
1100 |
// enumerate_modules for Windows NT, using PSAPI |
|
1101 |
static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) |
|
1102 |
{ |
|
1103 |
HANDLE hProcess ; |
|
1104 |
||
1105 |
# define MAX_NUM_MODULES 128 |
|
1106 |
HMODULE modules[MAX_NUM_MODULES]; |
|
1107 |
static char filename[ MAX_PATH ]; |
|
1108 |
int result = 0; |
|
1109 |
||
1110 |
if (!_has_psapi && (_psapi_init || !_init_psapi())) return 0; |
|
1111 |
||
1112 |
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, |
|
1113 |
FALSE, pid ) ; |
|
1114 |
if (hProcess == NULL) return 0; |
|
1115 |
||
1116 |
DWORD size_needed; |
|
1117 |
if (!_EnumProcessModules(hProcess, modules, |
|
1118 |
sizeof(modules), &size_needed)) { |
|
1119 |
CloseHandle( hProcess ); |
|
1120 |
return 0; |
|
1121 |
} |
|
1122 |
||
1123 |
// number of modules that are currently loaded |
|
1124 |
int num_modules = size_needed / sizeof(HMODULE); |
|
1125 |
||
1126 |
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { |
|
1127 |
// Get Full pathname: |
|
1128 |
if(!_GetModuleFileNameEx(hProcess, modules[i], |
|
1129 |
filename, sizeof(filename))) { |
|
1130 |
filename[0] = '\0'; |
|
1131 |
} |
|
1132 |
||
1133 |
MODULEINFO modinfo; |
|
1134 |
if (!_GetModuleInformation(hProcess, modules[i], |
|
1135 |
&modinfo, sizeof(modinfo))) { |
|
1136 |
modinfo.lpBaseOfDll = NULL; |
|
1137 |
modinfo.SizeOfImage = 0; |
|
1138 |
} |
|
1139 |
||
1140 |
// Invoke callback function |
|
1141 |
result = func(pid, filename, (address)modinfo.lpBaseOfDll, |
|
1142 |
modinfo.SizeOfImage, param); |
|
1143 |
if (result) break; |
|
1144 |
} |
|
1145 |
||
1146 |
CloseHandle( hProcess ) ; |
|
1147 |
return result; |
|
1148 |
} |
|
1149 |
||
1150 |
||
1151 |
// enumerate_modules for Windows 95/98/ME, using TOOLHELP |
|
1152 |
static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) |
|
1153 |
{ |
|
1154 |
HANDLE hSnapShot ; |
|
1155 |
static MODULEENTRY32 modentry ; |
|
1156 |
int result = 0; |
|
1157 |
||
1158 |
if (!_has_toolhelp) return 0; |
|
1159 |
||
1160 |
// Get a handle to a Toolhelp snapshot of the system |
|
1161 |
hSnapShot = _CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; |
|
1162 |
if( hSnapShot == INVALID_HANDLE_VALUE ) { |
|
1163 |
return FALSE ; |
|
1164 |
} |
|
1165 |
||
1166 |
// iterate through all modules |
|
1167 |
modentry.dwSize = sizeof(MODULEENTRY32) ; |
|
1168 |
bool not_done = _Module32First( hSnapShot, &modentry ) != 0; |
|
1169 |
||
1170 |
while( not_done ) { |
|
1171 |
// invoke the callback |
|
1172 |
result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, |
|
1173 |
modentry.modBaseSize, param); |
|
1174 |
if (result) break; |
|
1175 |
||
1176 |
modentry.dwSize = sizeof(MODULEENTRY32) ; |
|
1177 |
not_done = _Module32Next( hSnapShot, &modentry ) != 0; |
|
1178 |
} |
|
1179 |
||
1180 |
CloseHandle(hSnapShot); |
|
1181 |
return result; |
|
1182 |
} |
|
1183 |
||
1184 |
int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) |
|
1185 |
{ |
|
1186 |
// Get current process ID if caller doesn't provide it. |
|
1187 |
if (!pid) pid = os::current_process_id(); |
|
1188 |
||
1189 |
if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); |
|
1190 |
else return _enumerate_modules_windows(pid, func, param); |
|
1191 |
} |
|
1192 |
||
1193 |
struct _modinfo { |
|
1194 |
address addr; |
|
1195 |
char* full_path; // point to a char buffer |
|
1196 |
int buflen; // size of the buffer |
|
1197 |
address base_addr; |
|
1198 |
}; |
|
1199 |
||
1200 |
static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, |
|
1201 |
unsigned size, void * param) { |
|
1202 |
struct _modinfo *pmod = (struct _modinfo *)param; |
|
1203 |
if (!pmod) return -1; |
|
1204 |
||
1205 |
if (base_addr <= pmod->addr && |
|
1206 |
base_addr+size > pmod->addr) { |
|
1207 |
// if a buffer is provided, copy path name to the buffer |
|
1208 |
if (pmod->full_path) { |
|
1209 |
jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); |
|
1210 |
} |
|
1211 |
pmod->base_addr = base_addr; |
|
1212 |
return 1; |
|
1213 |
} |
|
1214 |
return 0; |
|
1215 |
} |
|
1216 |
||
1217 |
bool os::dll_address_to_library_name(address addr, char* buf, |
|
1218 |
int buflen, int* offset) { |
|
1219 |
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always |
|
1220 |
// return the full path to the DLL file, sometimes it returns path |
|
1221 |
// to the corresponding PDB file (debug info); sometimes it only |
|
1222 |
// returns partial path, which makes life painful. |
|
1223 |
||
1224 |
struct _modinfo mi; |
|
1225 |
mi.addr = addr; |
|
1226 |
mi.full_path = buf; |
|
1227 |
mi.buflen = buflen; |
|
1228 |
int pid = os::current_process_id(); |
|
1229 |
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { |
|
1230 |
// buf already contains path name |
|
1231 |
if (offset) *offset = addr - mi.base_addr; |
|
1232 |
return true; |
|
1233 |
} else { |
|
1234 |
if (buf) buf[0] = '\0'; |
|
1235 |
if (offset) *offset = -1; |
|
1236 |
return false; |
|
1237 |
} |
|
1238 |
} |
|
1239 |
||
1240 |
bool os::dll_address_to_function_name(address addr, char *buf, |
|
1241 |
int buflen, int *offset) { |
|
1242 |
// Unimplemented on Windows - in order to use SymGetSymFromAddr(), |
|
1243 |
// we need to initialize imagehlp/dbghelp, then load symbol table |
|
1244 |
// for every module. That's too much work to do after a fatal error. |
|
1245 |
// For an example on how to implement this function, see 1.4.2. |
|
1246 |
if (offset) *offset = -1; |
|
1247 |
if (buf) buf[0] = '\0'; |
|
1248 |
return false; |
|
1249 |
} |
|
1250 |
||
1251 |
// save the start and end address of jvm.dll into param[0] and param[1] |
|
1252 |
static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, |
|
1253 |
unsigned size, void * param) { |
|
1254 |
if (!param) return -1; |
|
1255 |
||
1256 |
if (base_addr <= (address)_locate_jvm_dll && |
|
1257 |
base_addr+size > (address)_locate_jvm_dll) { |
|
1258 |
((address*)param)[0] = base_addr; |
|
1259 |
((address*)param)[1] = base_addr + size; |
|
1260 |
return 1; |
|
1261 |
} |
|
1262 |
return 0; |
|
1263 |
} |
|
1264 |
||
1265 |
address vm_lib_location[2]; // start and end address of jvm.dll |
|
1266 |
||
1267 |
// check if addr is inside jvm.dll |
|
1268 |
bool os::address_is_in_vm(address addr) { |
|
1269 |
if (!vm_lib_location[0] || !vm_lib_location[1]) { |
|
1270 |
int pid = os::current_process_id(); |
|
1271 |
if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { |
|
1272 |
assert(false, "Can't find jvm module."); |
|
1273 |
return false; |
|
1274 |
} |
|
1275 |
} |
|
1276 |
||
1277 |
return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); |
|
1278 |
} |
|
1279 |
||
1280 |
// print module info; param is outputStream* |
|
1281 |
static int _print_module(int pid, char* fname, address base, |
|
1282 |
unsigned size, void* param) { |
|
1283 |
if (!param) return -1; |
|
1284 |
||
1285 |
outputStream* st = (outputStream*)param; |
|
1286 |
||
1287 |
address end_addr = base + size; |
|
1288 |
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); |
|
1289 |
return 0; |
|
1290 |
} |
|
1291 |
||
1292 |
// Loads .dll/.so and |
|
1293 |
// in case of error it checks if .dll/.so was built for the |
|
1294 |
// same architecture as Hotspot is running on |
|
1295 |
void * os::dll_load(const char *name, char *ebuf, int ebuflen) |
|
1296 |
{ |
|
1297 |
void * result = LoadLibrary(name); |
|
1298 |
if (result != NULL) |
|
1299 |
{ |
|
1300 |
return result; |
|
1301 |
} |
|
1302 |
||
1303 |
long errcode = GetLastError(); |
|
1304 |
if (errcode == ERROR_MOD_NOT_FOUND) { |
|
1305 |
strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); |
|
1306 |
ebuf[ebuflen-1]='\0'; |
|
1307 |
return NULL; |
|
1308 |
} |
|
1309 |
||
1310 |
// Parsing dll below |
|
1311 |
// If we can read dll-info and find that dll was built |
|
1312 |
// for an architecture other than Hotspot is running in |
|
1313 |
// - then print to buffer "DLL was built for a different architecture" |
|
1314 |
// else call getLastErrorString to obtain system error message |
|
1315 |
||
1316 |
// Read system error message into ebuf |
|
1317 |
// It may or may not be overwritten below (in the for loop and just above) |
|
1318 |
getLastErrorString(ebuf, (size_t) ebuflen); |
|
1319 |
ebuf[ebuflen-1]='\0'; |
|
1320 |
int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); |
|
1321 |
if (file_descriptor<0) |
|
1322 |
{ |
|
1323 |
return NULL; |
|
1324 |
} |
|
1325 |
||
1326 |
uint32_t signature_offset; |
|
1327 |
uint16_t lib_arch=0; |
|
1328 |
bool failed_to_get_lib_arch= |
|
1329 |
( |
|
1330 |
//Go to position 3c in the dll |
|
1331 |
(os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) |
|
1332 |
|| |
|
1333 |
// Read loacation of signature |
|
1334 |
(sizeof(signature_offset)!= |
|
1335 |
(os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) |
|
1336 |
|| |
|
1337 |
//Go to COFF File Header in dll |
|
1338 |
//that is located after"signature" (4 bytes long) |
|
1339 |
(os::seek_to_file_offset(file_descriptor, |
|
1340 |
signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) |
|
1341 |
|| |
|
1342 |
//Read field that contains code of architecture |
|
1343 |
// that dll was build for |
|
1344 |
(sizeof(lib_arch)!= |
|
1345 |
(os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) |
|
1346 |
); |
|
1347 |
||
1348 |
::close(file_descriptor); |
|
1349 |
if (failed_to_get_lib_arch) |
|
1350 |
{ |
|
1351 |
// file i/o error - report getLastErrorString(...) msg |
|
1352 |
return NULL; |
|
1353 |
} |
|
1354 |
||
1355 |
typedef struct |
|
1356 |
{ |
|
1357 |
uint16_t arch_code; |
|
1358 |
char* arch_name; |
|
1359 |
} arch_t; |
|
1360 |
||
1361 |
static const arch_t arch_array[]={ |
|
1362 |
{IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, |
|
1363 |
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, |
|
1364 |
{IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} |
|
1365 |
}; |
|
1366 |
#if (defined _M_IA64) |
|
1367 |
static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; |
|
1368 |
#elif (defined _M_AMD64) |
|
1369 |
static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; |
|
1370 |
#elif (defined _M_IX86) |
|
1371 |
static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; |
|
1372 |
#else |
|
1373 |
#error Method os::dll_load requires that one of following \ |
|
1374 |
is defined :_M_IA64,_M_AMD64 or _M_IX86 |
|
1375 |
#endif |
|
1376 |
||
1377 |
||
1378 |
// Obtain a string for printf operation |
|
1379 |
// lib_arch_str shall contain string what platform this .dll was built for |
|
1380 |
// running_arch_str shall string contain what platform Hotspot was built for |
|
1381 |
char *running_arch_str=NULL,*lib_arch_str=NULL; |
|
1382 |
for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) |
|
1383 |
{ |
|
1384 |
if (lib_arch==arch_array[i].arch_code) |
|
1385 |
lib_arch_str=arch_array[i].arch_name; |
|
1386 |
if (running_arch==arch_array[i].arch_code) |
|
1387 |
running_arch_str=arch_array[i].arch_name; |
|
1388 |
} |
|
1389 |
||
1390 |
assert(running_arch_str, |
|
1391 |
"Didn't find runing architecture code in arch_array"); |
|
1392 |
||
1393 |
// If the architure is right |
|
1394 |
// but some other error took place - report getLastErrorString(...) msg |
|
1395 |
if (lib_arch == running_arch) |
|
1396 |
{ |
|
1397 |
return NULL; |
|
1398 |
} |
|
1399 |
||
1400 |
if (lib_arch_str!=NULL) |
|
1401 |
{ |
|
1402 |
::_snprintf(ebuf, ebuflen-1, |
|
1403 |
"Can't load %s-bit .dll on a %s-bit platform", |
|
1404 |
lib_arch_str,running_arch_str); |
|
1405 |
} |
|
1406 |
else |
|
1407 |
{ |
|
1408 |
// don't know what architecture this dll was build for |
|
1409 |
::_snprintf(ebuf, ebuflen-1, |
|
1410 |
"Can't load this .dll (machine code=0x%x) on a %s-bit platform", |
|
1411 |
lib_arch,running_arch_str); |
|
1412 |
} |
|
1413 |
||
1414 |
return NULL; |
|
1415 |
} |
|
1416 |
||
1417 |
||
1418 |
void os::print_dll_info(outputStream *st) { |
|
1419 |
int pid = os::current_process_id(); |
|
1420 |
st->print_cr("Dynamic libraries:"); |
|
1421 |
enumerate_modules(pid, _print_module, (void *)st); |
|
1422 |
} |
|
1423 |
||
1424 |
void os::print_os_info(outputStream* st) { |
|
1425 |
st->print("OS:"); |
|
1426 |
||
1427 |
OSVERSIONINFOEX osvi; |
|
1428 |
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); |
|
1429 |
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); |
|
1430 |
||
1431 |
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { |
|
1432 |
st->print_cr("N/A"); |
|
1433 |
return; |
|
1434 |
} |
|
1435 |
||
1436 |
int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; |
|
1437 |
||
1438 |
if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { |
|
1439 |
switch (os_vers) { |
|
1440 |
case 3051: st->print(" Windows NT 3.51"); break; |
|
1441 |
case 4000: st->print(" Windows NT 4.0"); break; |
|
1442 |
case 5000: st->print(" Windows 2000"); break; |
|
1443 |
case 5001: st->print(" Windows XP"); break; |
|
1444 |
case 5002: st->print(" Windows Server 2003 family"); break; |
|
1445 |
case 6000: st->print(" Windows Vista"); break; |
|
1446 |
default: // future windows, print out its major and minor versions |
|
1447 |
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); |
|
1448 |
} |
|
1449 |
} else { |
|
1450 |
switch (os_vers) { |
|
1451 |
case 4000: st->print(" Windows 95"); break; |
|
1452 |
case 4010: st->print(" Windows 98"); break; |
|
1453 |
case 4090: st->print(" Windows Me"); break; |
|
1454 |
default: // future windows, print out its major and minor versions |
|
1455 |
st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); |
|
1456 |
} |
|
1457 |
} |
|
1458 |
||
1459 |
st->print(" Build %d", osvi.dwBuildNumber); |
|
1460 |
st->print(" %s", osvi.szCSDVersion); // service pack |
|
1461 |
st->cr(); |
|
1462 |
} |
|
1463 |
||
1464 |
void os::print_memory_info(outputStream* st) { |
|
1465 |
st->print("Memory:"); |
|
1466 |
st->print(" %dk page", os::vm_page_size()>>10); |
|
1467 |
||
1468 |
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory |
|
1469 |
// is larger than 4GB |
|
1470 |
MEMORYSTATUS ms; |
|
1471 |
GlobalMemoryStatus(&ms); |
|
1472 |
||
1473 |
st->print(", physical %uk", os::physical_memory() >> 10); |
|
1474 |
st->print("(%uk free)", os::available_memory() >> 10); |
|
1475 |
||
1476 |
st->print(", swap %uk", ms.dwTotalPageFile >> 10); |
|
1477 |
st->print("(%uk free)", ms.dwAvailPageFile >> 10); |
|
1478 |
st->cr(); |
|
1479 |
} |
|
1480 |
||
1481 |
void os::print_siginfo(outputStream *st, void *siginfo) { |
|
1482 |
EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; |
|
1483 |
st->print("siginfo:"); |
|
1484 |
st->print(" ExceptionCode=0x%x", er->ExceptionCode); |
|
1485 |
||
1486 |
if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && |
|
1487 |
er->NumberParameters >= 2) { |
|
1488 |
switch (er->ExceptionInformation[0]) { |
|
1489 |
case 0: st->print(", reading address"); break; |
|
1490 |
case 1: st->print(", writing address"); break; |
|
1491 |
default: st->print(", ExceptionInformation=" INTPTR_FORMAT, |
|
1492 |
er->ExceptionInformation[0]); |
|
1493 |
} |
|
1494 |
st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); |
|
1495 |
} else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && |
|
1496 |
er->NumberParameters >= 2 && UseSharedSpaces) { |
|
1497 |
FileMapInfo* mapinfo = FileMapInfo::current_info(); |
|
1498 |
if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { |
|
1499 |
st->print("\n\nError accessing class data sharing archive." \ |
|
1500 |
" Mapped file inaccessible during execution, " \ |
|
1501 |
" possible disk/network problem."); |
|
1502 |
} |
|
1503 |
} else { |
|
1504 |
int num = er->NumberParameters; |
|
1505 |
if (num > 0) { |
|
1506 |
st->print(", ExceptionInformation="); |
|
1507 |
for (int i = 0; i < num; i++) { |
|
1508 |
st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); |
|
1509 |
} |
|
1510 |
} |
|
1511 |
} |
|
1512 |
st->cr(); |
|
1513 |
} |
|
1514 |
||
1515 |
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { |
|
1516 |
// do nothing |
|
1517 |
} |
|
1518 |
||
1519 |
static char saved_jvm_path[MAX_PATH] = {0}; |
|
1520 |
||
1521 |
// Find the full path to the current module, jvm.dll or jvm_g.dll |
|
1522 |
void os::jvm_path(char *buf, jint buflen) { |
|
1523 |
// Error checking. |
|
1524 |
if (buflen < MAX_PATH) { |
|
1525 |
assert(false, "must use a large-enough buffer"); |
|
1526 |
buf[0] = '\0'; |
|
1527 |
return; |
|
1528 |
} |
|
1529 |
// Lazy resolve the path to current module. |
|
1530 |
if (saved_jvm_path[0] != 0) { |
|
1531 |
strcpy(buf, saved_jvm_path); |
|
1532 |
return; |
|
1533 |
} |
|
1534 |
||
1535 |
GetModuleFileName(vm_lib_handle, buf, buflen); |
|
1536 |
strcpy(saved_jvm_path, buf); |
|
1537 |
} |
|
1538 |
||
1539 |
||
1540 |
void os::print_jni_name_prefix_on(outputStream* st, int args_size) { |
|
1541 |
#ifndef _WIN64 |
|
1542 |
st->print("_"); |
|
1543 |
#endif |
|
1544 |
} |
|
1545 |
||
1546 |
||
1547 |
void os::print_jni_name_suffix_on(outputStream* st, int args_size) { |
|
1548 |
#ifndef _WIN64 |
|
1549 |
st->print("@%d", args_size * sizeof(int)); |
|
1550 |
#endif |
|
1551 |
} |
|
1552 |
||
1553 |
// sun.misc.Signal |
|
1554 |
// NOTE that this is a workaround for an apparent kernel bug where if |
|
1555 |
// a signal handler for SIGBREAK is installed then that signal handler |
|
1556 |
// takes priority over the console control handler for CTRL_CLOSE_EVENT. |
|
1557 |
// See bug 4416763. |
|
1558 |
static void (*sigbreakHandler)(int) = NULL; |
|
1559 |
||
1560 |
static void UserHandler(int sig, void *siginfo, void *context) { |
|
1561 |
os::signal_notify(sig); |
|
1562 |
// We need to reinstate the signal handler each time... |
|
1563 |
os::signal(sig, (void*)UserHandler); |
|
1564 |
} |
|
1565 |
||
1566 |
void* os::user_handler() { |
|
1567 |
return (void*) UserHandler; |
|
1568 |
} |
|
1569 |
||
1570 |
void* os::signal(int signal_number, void* handler) { |
|
1571 |
if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { |
|
1572 |
void (*oldHandler)(int) = sigbreakHandler; |
|
1573 |
sigbreakHandler = (void (*)(int)) handler; |
|
1574 |
return (void*) oldHandler; |
|
1575 |
} else { |
|
1576 |
return (void*)::signal(signal_number, (void (*)(int))handler); |
|
1577 |
} |
|
1578 |
} |
|
1579 |
||
1580 |
void os::signal_raise(int signal_number) { |
|
1581 |
raise(signal_number); |
|
1582 |
} |
|
1583 |
||
1584 |
// The Win32 C runtime library maps all console control events other than ^C |
|
1585 |
// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, |
|
1586 |
// logoff, and shutdown events. We therefore install our own console handler |
|
1587 |
// that raises SIGTERM for the latter cases. |
|
1588 |
// |
|
1589 |
static BOOL WINAPI consoleHandler(DWORD event) { |
|
1590 |
switch(event) { |
|
1591 |
case CTRL_C_EVENT: |
|
1592 |
if (is_error_reported()) { |
|
1593 |
// Ctrl-C is pressed during error reporting, likely because the error |
|
1594 |
// handler fails to abort. Let VM die immediately. |
|
1595 |
os::die(); |
|
1596 |
} |
|
1597 |
||
1598 |
os::signal_raise(SIGINT); |
|
1599 |
return TRUE; |
|
1600 |
break; |
|
1601 |
case CTRL_BREAK_EVENT: |
|
1602 |
if (sigbreakHandler != NULL) { |
|
1603 |
(*sigbreakHandler)(SIGBREAK); |
|
1604 |
} |
|
1605 |
return TRUE; |
|
1606 |
break; |
|
1607 |
case CTRL_CLOSE_EVENT: |
|
1608 |
case CTRL_LOGOFF_EVENT: |
|
1609 |
case CTRL_SHUTDOWN_EVENT: |
|
1610 |
os::signal_raise(SIGTERM); |
|
1611 |
return TRUE; |
|
1612 |
break; |
|
1613 |
default: |
|
1614 |
break; |
|
1615 |
} |
|
1616 |
return FALSE; |
|
1617 |
} |
|
1618 |
||
1619 |
/* |
|
1620 |
* The following code is moved from os.cpp for making this |
|
1621 |
* code platform specific, which it is by its very nature. |
|
1622 |
*/ |
|
1623 |
||
1624 |
// Return maximum OS signal used + 1 for internal use only |
|
1625 |
// Used as exit signal for signal_thread |
|
1626 |
int os::sigexitnum_pd(){ |
|
1627 |
return NSIG; |
|
1628 |
} |
|
1629 |
||
1630 |
// a counter for each possible signal value, including signal_thread exit signal |
|
1631 |
static volatile jint pending_signals[NSIG+1] = { 0 }; |
|
1632 |
static HANDLE sig_sem; |
|
1633 |
||
1634 |
void os::signal_init_pd() { |
|
1635 |
// Initialize signal structures |
|
1636 |
memset((void*)pending_signals, 0, sizeof(pending_signals)); |
|
1637 |
||
1638 |
sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); |
|
1639 |
||
1640 |
// Programs embedding the VM do not want it to attempt to receive |
|
1641 |
// events like CTRL_LOGOFF_EVENT, which are used to implement the |
|
1642 |
// shutdown hooks mechanism introduced in 1.3. For example, when |
|
1643 |
// the VM is run as part of a Windows NT service (i.e., a servlet |
|
1644 |
// engine in a web server), the correct behavior is for any console |
|
1645 |
// control handler to return FALSE, not TRUE, because the OS's |
|
1646 |
// "final" handler for such events allows the process to continue if |
|
1647 |
// it is a service (while terminating it if it is not a service). |
|
1648 |
// To make this behavior uniform and the mechanism simpler, we |
|
1649 |
// completely disable the VM's usage of these console events if -Xrs |
|
1650 |
// (=ReduceSignalUsage) is specified. This means, for example, that |
|
1651 |
// the CTRL-BREAK thread dump mechanism is also disabled in this |
|
1652 |
// case. See bugs 4323062, 4345157, and related bugs. |
|
1653 |
||
1654 |
if (!ReduceSignalUsage) { |
|
1655 |
// Add a CTRL-C handler |
|
1656 |
SetConsoleCtrlHandler(consoleHandler, TRUE); |
|
1657 |
} |
|
1658 |
} |
|
1659 |
||
1660 |
void os::signal_notify(int signal_number) { |
|
1661 |
BOOL ret; |
|
1662 |
||
1663 |
Atomic::inc(&pending_signals[signal_number]); |
|
1664 |
ret = ::ReleaseSemaphore(sig_sem, 1, NULL); |
|
1665 |
assert(ret != 0, "ReleaseSemaphore() failed"); |
|
1666 |
} |
|
1667 |
||
1668 |
static int check_pending_signals(bool wait_for_signal) { |
|
1669 |
DWORD ret; |
|
1670 |
while (true) { |
|
1671 |
for (int i = 0; i < NSIG + 1; i++) { |
|
1672 |
jint n = pending_signals[i]; |
|
1673 |
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { |
|
1674 |
return i; |
|
1675 |
} |
|
1676 |
} |
|
1677 |
if (!wait_for_signal) { |
|
1678 |
return -1; |
|
1679 |
} |
|
1680 |
||
1681 |
JavaThread *thread = JavaThread::current(); |
|
1682 |
||
1683 |
ThreadBlockInVM tbivm(thread); |
|
1684 |
||
1685 |
bool threadIsSuspended; |
|
1686 |
do { |
|
1687 |
thread->set_suspend_equivalent(); |
|
1688 |
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() |
|
1689 |
ret = ::WaitForSingleObject(sig_sem, INFINITE); |
|
1690 |
assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); |
|
1691 |
||
1692 |
// were we externally suspended while we were waiting? |
|
1693 |
threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); |
|
1694 |
if (threadIsSuspended) { |
|
1695 |
// |
|
1696 |
// The semaphore has been incremented, but while we were waiting |
|
1697 |
// another thread suspended us. We don't want to continue running |
|
1698 |
// while suspended because that would surprise the thread that |
|
1699 |
// suspended us. |
|
1700 |
// |
|
1701 |
ret = ::ReleaseSemaphore(sig_sem, 1, NULL); |
|
1702 |
assert(ret != 0, "ReleaseSemaphore() failed"); |
|
1703 |
||
1704 |
thread->java_suspend_self(); |
|
1705 |
} |
|
1706 |
} while (threadIsSuspended); |
|
1707 |
} |
|
1708 |
} |
|
1709 |
||
1710 |
int os::signal_lookup() { |
|
1711 |
return check_pending_signals(false); |
|
1712 |
} |
|
1713 |
||
1714 |
int os::signal_wait() { |
|
1715 |
return check_pending_signals(true); |
|
1716 |
} |
|
1717 |
||
1718 |
// Implicit OS exception handling |
|
1719 |
||
1720 |
LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { |
|
1721 |
JavaThread* thread = JavaThread::current(); |
|
1722 |
// Save pc in thread |
|
1723 |
#ifdef _M_IA64 |
|
1724 |
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP); |
|
1725 |
// Set pc to handler |
|
1726 |
exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; |
|
1727 |
#elif _M_AMD64 |
|
1728 |
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip); |
|
1729 |
// Set pc to handler |
|
1730 |
exceptionInfo->ContextRecord->Rip = (DWORD64)handler; |
|
1731 |
#else |
|
1732 |
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip); |
|
1733 |
// Set pc to handler |
|
1734 |
exceptionInfo->ContextRecord->Eip = (LONG)handler; |
|
1735 |
#endif |
|
1736 |
||
1737 |
// Continue the execution |
|
1738 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
1739 |
} |
|
1740 |
||
1741 |
||
1742 |
// Used for PostMortemDump |
|
1743 |
extern "C" void safepoints(); |
|
1744 |
extern "C" void find(int x); |
|
1745 |
extern "C" void events(); |
|
1746 |
||
1747 |
// According to Windows API documentation, an illegal instruction sequence should generate |
|
1748 |
// the 0xC000001C exception code. However, real world experience shows that occasionnaly |
|
1749 |
// the execution of an illegal instruction can generate the exception code 0xC000001E. This |
|
1750 |
// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). |
|
1751 |
||
1752 |
#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E |
|
1753 |
||
1754 |
// From "Execution Protection in the Windows Operating System" draft 0.35 |
|
1755 |
// Once a system header becomes available, the "real" define should be |
|
1756 |
// included or copied here. |
|
1757 |
#define EXCEPTION_INFO_EXEC_VIOLATION 0x08 |
|
1758 |
||
1759 |
#define def_excpt(val) #val, val |
|
1760 |
||
1761 |
struct siglabel { |
|
1762 |
char *name; |
|
1763 |
int number; |
|
1764 |
}; |
|
1765 |
||
1766 |
struct siglabel exceptlabels[] = { |
|
1767 |
def_excpt(EXCEPTION_ACCESS_VIOLATION), |
|
1768 |
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), |
|
1769 |
def_excpt(EXCEPTION_BREAKPOINT), |
|
1770 |
def_excpt(EXCEPTION_SINGLE_STEP), |
|
1771 |
def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), |
|
1772 |
def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), |
|
1773 |
def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), |
|
1774 |
def_excpt(EXCEPTION_FLT_INEXACT_RESULT), |
|
1775 |
def_excpt(EXCEPTION_FLT_INVALID_OPERATION), |
|
1776 |
def_excpt(EXCEPTION_FLT_OVERFLOW), |
|
1777 |
def_excpt(EXCEPTION_FLT_STACK_CHECK), |
|
1778 |
def_excpt(EXCEPTION_FLT_UNDERFLOW), |
|
1779 |
def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), |
|
1780 |
def_excpt(EXCEPTION_INT_OVERFLOW), |
|
1781 |
def_excpt(EXCEPTION_PRIV_INSTRUCTION), |
|
1782 |
def_excpt(EXCEPTION_IN_PAGE_ERROR), |
|
1783 |
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), |
|
1784 |
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), |
|
1785 |
def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), |
|
1786 |
def_excpt(EXCEPTION_STACK_OVERFLOW), |
|
1787 |
def_excpt(EXCEPTION_INVALID_DISPOSITION), |
|
1788 |
def_excpt(EXCEPTION_GUARD_PAGE), |
|
1789 |
def_excpt(EXCEPTION_INVALID_HANDLE), |
|
1790 |
NULL, 0 |
|
1791 |
}; |
|
1792 |
||
1793 |
const char* os::exception_name(int exception_code, char *buf, size_t size) { |
|
1794 |
for (int i = 0; exceptlabels[i].name != NULL; i++) { |
|
1795 |
if (exceptlabels[i].number == exception_code) { |
|
1796 |
jio_snprintf(buf, size, "%s", exceptlabels[i].name); |
|
1797 |
return buf; |
|
1798 |
} |
|
1799 |
} |
|
1800 |
||
1801 |
return NULL; |
|
1802 |
} |
|
1803 |
||
1804 |
//----------------------------------------------------------------------------- |
|
1805 |
LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { |
|
1806 |
// handle exception caused by idiv; should only happen for -MinInt/-1 |
|
1807 |
// (division by zero is handled explicitly) |
|
1808 |
#ifdef _M_IA64 |
|
1809 |
assert(0, "Fix Handle_IDiv_Exception"); |
|
1810 |
#elif _M_AMD64 |
|
1811 |
PCONTEXT ctx = exceptionInfo->ContextRecord; |
|
1812 |
address pc = (address)ctx->Rip; |
|
1813 |
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc)); |
|
1814 |
assert(pc[0] == 0xF7, "not an idiv opcode"); |
|
1815 |
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); |
|
1816 |
assert(ctx->Rax == min_jint, "unexpected idiv exception"); |
|
1817 |
// set correct result values and continue after idiv instruction |
|
1818 |
ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes |
|
1819 |
ctx->Rax = (DWORD)min_jint; // result |
|
1820 |
ctx->Rdx = (DWORD)0; // remainder |
|
1821 |
// Continue the execution |
|
1822 |
#else |
|
1823 |
PCONTEXT ctx = exceptionInfo->ContextRecord; |
|
1824 |
address pc = (address)ctx->Eip; |
|
1825 |
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc)); |
|
1826 |
assert(pc[0] == 0xF7, "not an idiv opcode"); |
|
1827 |
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); |
|
1828 |
assert(ctx->Eax == min_jint, "unexpected idiv exception"); |
|
1829 |
// set correct result values and continue after idiv instruction |
|
1830 |
ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes |
|
1831 |
ctx->Eax = (DWORD)min_jint; // result |
|
1832 |
ctx->Edx = (DWORD)0; // remainder |
|
1833 |
// Continue the execution |
|
1834 |
#endif |
|
1835 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
1836 |
} |
|
1837 |
||
1838 |
#ifndef _WIN64 |
|
1839 |
//----------------------------------------------------------------------------- |
|
1840 |
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { |
|
1841 |
// handle exception caused by native mothod modifying control word |
|
1842 |
PCONTEXT ctx = exceptionInfo->ContextRecord; |
|
1843 |
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; |
|
1844 |
||
1845 |
switch (exception_code) { |
|
1846 |
case EXCEPTION_FLT_DENORMAL_OPERAND: |
|
1847 |
case EXCEPTION_FLT_DIVIDE_BY_ZERO: |
|
1848 |
case EXCEPTION_FLT_INEXACT_RESULT: |
|
1849 |
case EXCEPTION_FLT_INVALID_OPERATION: |
|
1850 |
case EXCEPTION_FLT_OVERFLOW: |
|
1851 |
case EXCEPTION_FLT_STACK_CHECK: |
|
1852 |
case EXCEPTION_FLT_UNDERFLOW: |
|
1853 |
jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); |
|
1854 |
if (fp_control_word != ctx->FloatSave.ControlWord) { |
|
1855 |
// Restore FPCW and mask out FLT exceptions |
|
1856 |
ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; |
|
1857 |
// Mask out pending FLT exceptions |
|
1858 |
ctx->FloatSave.StatusWord &= 0xffffff00; |
|
1859 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
1860 |
} |
|
1861 |
} |
|
1862 |
return EXCEPTION_CONTINUE_SEARCH; |
|
1863 |
} |
|
1864 |
#else //_WIN64 |
|
1865 |
/* |
|
1866 |
On Windows, the mxcsr control bits are non-volatile across calls |
|
1867 |
See also CR 6192333 |
|
1868 |
If EXCEPTION_FLT_* happened after some native method modified |
|
1869 |
mxcsr - it is not a jvm fault. |
|
1870 |
However should we decide to restore of mxcsr after a faulty |
|
1871 |
native method we can uncomment following code |
|
1872 |
jint MxCsr = INITIAL_MXCSR; |
|
1873 |
// we can't use StubRoutines::addr_mxcsr_std() |
|
1874 |
// because in Win64 mxcsr is not saved there |
|
1875 |
if (MxCsr != ctx->MxCsr) { |
|
1876 |
ctx->MxCsr = MxCsr; |
|
1877 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
1878 |
} |
|
1879 |
||
1880 |
*/ |
|
1881 |
#endif //_WIN64 |
|
1882 |
||
1883 |
||
1884 |
// Fatal error reporting is single threaded so we can make this a |
|
1885 |
// static and preallocated. If it's more than MAX_PATH silently ignore |
|
1886 |
// it. |
|
1887 |
static char saved_error_file[MAX_PATH] = {0}; |
|
1888 |
||
1889 |
void os::set_error_file(const char *logfile) { |
|
1890 |
if (strlen(logfile) <= MAX_PATH) { |
|
1891 |
strncpy(saved_error_file, logfile, MAX_PATH); |
|
1892 |
} |
|
1893 |
} |
|
1894 |
||
1895 |
static inline void report_error(Thread* t, DWORD exception_code, |
|
1896 |
address addr, void* siginfo, void* context) { |
|
1897 |
VMError err(t, exception_code, addr, siginfo, context); |
|
1898 |
err.report_and_die(); |
|
1899 |
||
1900 |
// If UseOsErrorReporting, this will return here and save the error file |
|
1901 |
// somewhere where we can find it in the minidump. |
|
1902 |
} |
|
1903 |
||
1904 |
//----------------------------------------------------------------------------- |
|
1905 |
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { |
|
1906 |
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; |
|
1907 |
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; |
|
1908 |
#ifdef _M_IA64 |
|
1909 |
address pc = (address) exceptionInfo->ContextRecord->StIIP; |
|
1910 |
#elif _M_AMD64 |
|
1911 |
address pc = (address) exceptionInfo->ContextRecord->Rip; |
|
1912 |
#else |
|
1913 |
address pc = (address) exceptionInfo->ContextRecord->Eip; |
|
1914 |
#endif |
|
1915 |
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady |
|
1916 |
||
1917 |
#ifndef _WIN64 |
|
1918 |
// Execution protection violation - win32 running on AMD64 only |
|
1919 |
// Handled first to avoid misdiagnosis as a "normal" access violation; |
|
1920 |
// This is safe to do because we have a new/unique ExceptionInformation |
|
1921 |
// code for this condition. |
|
1922 |
if (exception_code == EXCEPTION_ACCESS_VIOLATION) { |
|
1923 |
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; |
|
1924 |
int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; |
|
1925 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
1926 |
||
1927 |
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { |
|
1928 |
int page_size = os::vm_page_size(); |
|
1929 |
||
1930 |
// Make sure the pc and the faulting address are sane. |
|
1931 |
// |
|
1932 |
// If an instruction spans a page boundary, and the page containing |
|
1933 |
// the beginning of the instruction is executable but the following |
|
1934 |
// page is not, the pc and the faulting address might be slightly |
|
1935 |
// different - we still want to unguard the 2nd page in this case. |
|
1936 |
// |
|
1937 |
// 15 bytes seems to be a (very) safe value for max instruction size. |
|
1938 |
bool pc_is_near_addr = |
|
1939 |
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); |
|
1940 |
bool instr_spans_page_boundary = |
|
1941 |
(align_size_down((intptr_t) pc ^ (intptr_t) addr, |
|
1942 |
(intptr_t) page_size) > 0); |
|
1943 |
||
1944 |
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { |
|
1945 |
static volatile address last_addr = |
|
1946 |
(address) os::non_memory_address_word(); |
|
1947 |
||
1948 |
// In conservative mode, don't unguard unless the address is in the VM |
|
1949 |
if (UnguardOnExecutionViolation > 0 && addr != last_addr && |
|
1950 |
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { |
|
1951 |
||
1952 |
// Unguard and retry |
|
1953 |
address page_start = |
|
1954 |
(address) align_size_down((intptr_t) addr, (intptr_t) page_size); |
|
1955 |
bool res = os::unguard_memory((char*) page_start, page_size); |
|
1956 |
||
1957 |
if (PrintMiscellaneous && Verbose) { |
|
1958 |
char buf[256]; |
|
1959 |
jio_snprintf(buf, sizeof(buf), "Execution protection violation " |
|
1960 |
"at " INTPTR_FORMAT |
|
1961 |
", unguarding " INTPTR_FORMAT ": %s", addr, |
|
1962 |
page_start, (res ? "success" : strerror(errno))); |
|
1963 |
tty->print_raw_cr(buf); |
|
1964 |
} |
|
1965 |
||
1966 |
// Set last_addr so if we fault again at the same address, we don't |
|
1967 |
// end up in an endless loop. |
|
1968 |
// |
|
1969 |
// There are two potential complications here. Two threads trapping |
|
1970 |
// at the same address at the same time could cause one of the |
|
1971 |
// threads to think it already unguarded, and abort the VM. Likely |
|
1972 |
// very rare. |
|
1973 |
// |
|
1974 |
// The other race involves two threads alternately trapping at |
|
1975 |
// different addresses and failing to unguard the page, resulting in |
|
1976 |
// an endless loop. This condition is probably even more unlikely |
|
1977 |
// than the first. |
|
1978 |
// |
|
1979 |
// Although both cases could be avoided by using locks or thread |
|
1980 |
// local last_addr, these solutions are unnecessary complication: |
|
1981 |
// this handler is a best-effort safety net, not a complete solution. |
|
1982 |
// It is disabled by default and should only be used as a workaround |
|
1983 |
// in case we missed any no-execute-unsafe VM code. |
|
1984 |
||
1985 |
last_addr = addr; |
|
1986 |
||
1987 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
1988 |
} |
|
1989 |
} |
|
1990 |
||
1991 |
// Last unguard failed or not unguarding |
|
1992 |
tty->print_raw_cr("Execution protection violation"); |
|
1993 |
report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, |
|
1994 |
exceptionInfo->ContextRecord); |
|
1995 |
return EXCEPTION_CONTINUE_SEARCH; |
|
1996 |
} |
|
1997 |
} |
|
1998 |
#endif // _WIN64 |
|
1999 |
||
2000 |
// Check to see if we caught the safepoint code in the |
|
2001 |
// process of write protecting the memory serialization page. |
|
2002 |
// It write enables the page immediately after protecting it |
|
2003 |
// so just return. |
|
2004 |
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { |
|
2005 |
JavaThread* thread = (JavaThread*) t; |
|
2006 |
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; |
|
2007 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
2008 |
if ( os::is_memory_serialize_page(thread, addr) ) { |
|
2009 |
// Block current thread until the memory serialize page permission restored. |
|
2010 |
os::block_on_serialize_page_trap(); |
|
2011 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
2012 |
} |
|
2013 |
} |
|
2014 |
||
2015 |
||
2016 |
if (t != NULL && t->is_Java_thread()) { |
|
2017 |
JavaThread* thread = (JavaThread*) t; |
|
2018 |
bool in_java = thread->thread_state() == _thread_in_Java; |
|
2019 |
||
2020 |
// Handle potential stack overflows up front. |
|
2021 |
if (exception_code == EXCEPTION_STACK_OVERFLOW) { |
|
2022 |
if (os::uses_stack_guard_pages()) { |
|
2023 |
#ifdef _M_IA64 |
|
2024 |
// |
|
2025 |
// If it's a legal stack address continue, Windows will map it in. |
|
2026 |
// |
|
2027 |
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; |
|
2028 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
2029 |
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) |
|
2030 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
2031 |
||
2032 |
// The register save area is the same size as the memory stack |
|
2033 |
// and starts at the page just above the start of the memory stack. |
|
2034 |
// If we get a fault in this area, we've run out of register |
|
2035 |
// stack. If we are in java, try throwing a stack overflow exception. |
|
2036 |
if (addr > thread->stack_base() && |
|
2037 |
addr <= (thread->stack_base()+thread->stack_size()) ) { |
|
2038 |
char buf[256]; |
|
2039 |
jio_snprintf(buf, sizeof(buf), |
|
2040 |
"Register stack overflow, addr:%p, stack_base:%p\n", |
|
2041 |
addr, thread->stack_base() ); |
|
2042 |
tty->print_raw_cr(buf); |
|
2043 |
// If not in java code, return and hope for the best. |
|
2044 |
return in_java ? Handle_Exception(exceptionInfo, |
|
2045 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) |
|
2046 |
: EXCEPTION_CONTINUE_EXECUTION; |
|
2047 |
} |
|
2048 |
#endif |
|
2049 |
if (thread->stack_yellow_zone_enabled()) { |
|
2050 |
// Yellow zone violation. The o/s has unprotected the first yellow |
|
2051 |
// zone page for us. Note: must call disable_stack_yellow_zone to |
|
2052 |
// update the enabled status, even if the zone contains only one page. |
|
2053 |
thread->disable_stack_yellow_zone(); |
|
2054 |
// If not in java code, return and hope for the best. |
|
2055 |
return in_java ? Handle_Exception(exceptionInfo, |
|
2056 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) |
|
2057 |
: EXCEPTION_CONTINUE_EXECUTION; |
|
2058 |
} else { |
|
2059 |
// Fatal red zone violation. |
|
2060 |
thread->disable_stack_red_zone(); |
|
2061 |
tty->print_raw_cr("An unrecoverable stack overflow has occurred."); |
|
2062 |
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2063 |
exceptionInfo->ContextRecord); |
|
2064 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2065 |
} |
|
2066 |
} else if (in_java) { |
|
2067 |
// JVM-managed guard pages cannot be used on win95/98. The o/s provides |
|
2068 |
// a one-time-only guard page, which it has released to us. The next |
|
2069 |
// stack overflow on this thread will result in an ACCESS_VIOLATION. |
|
2070 |
return Handle_Exception(exceptionInfo, |
|
2071 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); |
|
2072 |
} else { |
|
2073 |
// Can only return and hope for the best. Further stack growth will |
|
2074 |
// result in an ACCESS_VIOLATION. |
|
2075 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
2076 |
} |
|
2077 |
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { |
|
2078 |
// Either stack overflow or null pointer exception. |
|
2079 |
if (in_java) { |
|
2080 |
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; |
|
2081 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
2082 |
address stack_end = thread->stack_base() - thread->stack_size(); |
|
2083 |
if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { |
|
2084 |
// Stack overflow. |
|
2085 |
assert(!os::uses_stack_guard_pages(), |
|
2086 |
"should be caught by red zone code above."); |
|
2087 |
return Handle_Exception(exceptionInfo, |
|
2088 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); |
|
2089 |
} |
|
2090 |
// |
|
2091 |
// Check for safepoint polling and implicit null |
|
2092 |
// We only expect null pointers in the stubs (vtable) |
|
2093 |
// the rest are checked explicitly now. |
|
2094 |
// |
|
2095 |
CodeBlob* cb = CodeCache::find_blob(pc); |
|
2096 |
if (cb != NULL) { |
|
2097 |
if (os::is_poll_address(addr)) { |
|
2098 |
address stub = SharedRuntime::get_poll_stub(pc); |
|
2099 |
return Handle_Exception(exceptionInfo, stub); |
|
2100 |
} |
|
2101 |
} |
|
2102 |
{ |
|
2103 |
#ifdef _WIN64 |
|
2104 |
// |
|
2105 |
// If it's a legal stack address map the entire region in |
|
2106 |
// |
|
2107 |
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; |
|
2108 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
2109 |
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { |
|
2110 |
addr = (address)((uintptr_t)addr & |
|
2111 |
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); |
|
2112 |
os::commit_memory( (char *)addr, thread->stack_base() - addr ); |
|
2113 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
2114 |
} |
|
2115 |
else |
|
2116 |
#endif |
|
2117 |
{ |
|
2118 |
// Null pointer exception. |
|
2119 |
#ifdef _M_IA64 |
|
2120 |
// We catch register stack overflows in compiled code by doing |
|
2121 |
// an explicit compare and executing a st8(G0, G0) if the |
|
2122 |
// BSP enters into our guard area. We test for the overflow |
|
2123 |
// condition and fall into the normal null pointer exception |
|
2124 |
// code if BSP hasn't overflowed. |
|
2125 |
if ( in_java ) { |
|
2126 |
if(thread->register_stack_overflow()) { |
|
2127 |
assert((address)exceptionInfo->ContextRecord->IntS3 == |
|
2128 |
thread->register_stack_limit(), |
|
2129 |
"GR7 doesn't contain register_stack_limit"); |
|
2130 |
// Disable the yellow zone which sets the state that |
|
2131 |
// we've got a stack overflow problem. |
|
2132 |
if (thread->stack_yellow_zone_enabled()) { |
|
2133 |
thread->disable_stack_yellow_zone(); |
|
2134 |
} |
|
2135 |
// Give us some room to process the exception |
|
2136 |
thread->disable_register_stack_guard(); |
|
2137 |
// Update GR7 with the new limit so we can continue running |
|
2138 |
// compiled code. |
|
2139 |
exceptionInfo->ContextRecord->IntS3 = |
|
2140 |
(ULONGLONG)thread->register_stack_limit(); |
|
2141 |
return Handle_Exception(exceptionInfo, |
|
2142 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); |
|
2143 |
} else { |
|
2144 |
// |
|
2145 |
// Check for implicit null |
|
2146 |
// We only expect null pointers in the stubs (vtable) |
|
2147 |
// the rest are checked explicitly now. |
|
2148 |
// |
|
2149 |
CodeBlob* cb = CodeCache::find_blob(pc); |
|
2150 |
if (cb != NULL) { |
|
2151 |
if (VtableStubs::stub_containing(pc) != NULL) { |
|
2152 |
if (((uintptr_t)addr) < os::vm_page_size() ) { |
|
2153 |
// an access to the first page of VM--assume it is a null pointer |
|
2154 |
return Handle_Exception(exceptionInfo, |
|
2155 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); |
|
2156 |
} |
|
2157 |
} |
|
2158 |
} |
|
2159 |
} |
|
2160 |
} // in_java |
|
2161 |
||
2162 |
// IA64 doesn't use implicit null checking yet. So we shouldn't |
|
2163 |
// get here. |
|
2164 |
tty->print_raw_cr("Access violation, possible null pointer exception"); |
|
2165 |
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2166 |
exceptionInfo->ContextRecord); |
|
2167 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2168 |
#else /* !IA64 */ |
|
2169 |
||
2170 |
// Windows 98 reports faulting addresses incorrectly |
|
2171 |
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || |
|
2172 |
!os::win32::is_nt()) { |
|
2173 |
return Handle_Exception(exceptionInfo, |
|
2174 |
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL)); |
|
2175 |
} |
|
2176 |
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2177 |
exceptionInfo->ContextRecord); |
|
2178 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2179 |
#endif |
|
2180 |
} |
|
2181 |
} |
|
2182 |
} |
|
2183 |
||
2184 |
#ifdef _WIN64 |
|
2185 |
// Special care for fast JNI field accessors. |
|
2186 |
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks |
|
2187 |
// in and the heap gets shrunk before the field access. |
|
2188 |
if (exception_code == EXCEPTION_ACCESS_VIOLATION) { |
|
2189 |
address addr = JNI_FastGetField::find_slowcase_pc(pc); |
|
2190 |
if (addr != (address)-1) { |
|
2191 |
return Handle_Exception(exceptionInfo, addr); |
|
2192 |
} |
|
2193 |
} |
|
2194 |
#endif |
|
2195 |
||
2196 |
#ifdef _WIN64 |
|
2197 |
// Windows will sometimes generate an access violation |
|
2198 |
// when we call malloc. Since we use VectoredExceptions |
|
2199 |
// on 64 bit platforms, we see this exception. We must |
|
2200 |
// pass this exception on so Windows can recover. |
|
2201 |
// We check to see if the pc of the fault is in NTDLL.DLL |
|
2202 |
// if so, we pass control on to Windows for handling. |
|
2203 |
if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH; |
|
2204 |
#endif |
|
2205 |
||
2206 |
// Stack overflow or null pointer exception in native code. |
|
2207 |
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2208 |
exceptionInfo->ContextRecord); |
|
2209 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2210 |
} |
|
2211 |
||
2212 |
if (in_java) { |
|
2213 |
switch (exception_code) { |
|
2214 |
case EXCEPTION_INT_DIVIDE_BY_ZERO: |
|
2215 |
return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); |
|
2216 |
||
2217 |
case EXCEPTION_INT_OVERFLOW: |
|
2218 |
return Handle_IDiv_Exception(exceptionInfo); |
|
2219 |
||
2220 |
} // switch |
|
2221 |
} |
|
2222 |
#ifndef _WIN64 |
|
2223 |
if ((thread->thread_state() == _thread_in_Java) || |
|
2224 |
(thread->thread_state() == _thread_in_native) ) |
|
2225 |
{ |
|
2226 |
LONG result=Handle_FLT_Exception(exceptionInfo); |
|
2227 |
if (result==EXCEPTION_CONTINUE_EXECUTION) return result; |
|
2228 |
} |
|
2229 |
#endif //_WIN64 |
|
2230 |
} |
|
2231 |
||
2232 |
if (exception_code != EXCEPTION_BREAKPOINT) { |
|
2233 |
#ifndef _WIN64 |
|
2234 |
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2235 |
exceptionInfo->ContextRecord); |
|
2236 |
#else |
|
2237 |
// Itanium Windows uses a VectoredExceptionHandler |
|
2238 |
// Which means that C++ programatic exception handlers (try/except) |
|
2239 |
// will get here. Continue the search for the right except block if |
|
2240 |
// the exception code is not a fatal code. |
|
2241 |
switch ( exception_code ) { |
|
2242 |
case EXCEPTION_ACCESS_VIOLATION: |
|
2243 |
case EXCEPTION_STACK_OVERFLOW: |
|
2244 |
case EXCEPTION_ILLEGAL_INSTRUCTION: |
|
2245 |
case EXCEPTION_ILLEGAL_INSTRUCTION_2: |
|
2246 |
case EXCEPTION_INT_OVERFLOW: |
|
2247 |
case EXCEPTION_INT_DIVIDE_BY_ZERO: |
|
2248 |
{ report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, |
|
2249 |
exceptionInfo->ContextRecord); |
|
2250 |
} |
|
2251 |
break; |
|
2252 |
default: |
|
2253 |
break; |
|
2254 |
} |
|
2255 |
#endif |
|
2256 |
} |
|
2257 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2258 |
} |
|
2259 |
||
2260 |
#ifndef _WIN64 |
|
2261 |
// Special care for fast JNI accessors. |
|
2262 |
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and |
|
2263 |
// the heap gets shrunk before the field access. |
|
2264 |
// Need to install our own structured exception handler since native code may |
|
2265 |
// install its own. |
|
2266 |
LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { |
|
2267 |
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; |
|
2268 |
if (exception_code == EXCEPTION_ACCESS_VIOLATION) { |
|
2269 |
address pc = (address) exceptionInfo->ContextRecord->Eip; |
|
2270 |
address addr = JNI_FastGetField::find_slowcase_pc(pc); |
|
2271 |
if (addr != (address)-1) { |
|
2272 |
return Handle_Exception(exceptionInfo, addr); |
|
2273 |
} |
|
2274 |
} |
|
2275 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2276 |
} |
|
2277 |
||
2278 |
#define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ |
|
2279 |
Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ |
|
2280 |
__try { \ |
|
2281 |
return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ |
|
2282 |
} __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ |
|
2283 |
} \ |
|
2284 |
return 0; \ |
|
2285 |
} |
|
2286 |
||
2287 |
DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) |
|
2288 |
DEFINE_FAST_GETFIELD(jbyte, byte, Byte) |
|
2289 |
DEFINE_FAST_GETFIELD(jchar, char, Char) |
|
2290 |
DEFINE_FAST_GETFIELD(jshort, short, Short) |
|
2291 |
DEFINE_FAST_GETFIELD(jint, int, Int) |
|
2292 |
DEFINE_FAST_GETFIELD(jlong, long, Long) |
|
2293 |
DEFINE_FAST_GETFIELD(jfloat, float, Float) |
|
2294 |
DEFINE_FAST_GETFIELD(jdouble, double, Double) |
|
2295 |
||
2296 |
address os::win32::fast_jni_accessor_wrapper(BasicType type) { |
|
2297 |
switch (type) { |
|
2298 |
case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; |
|
2299 |
case T_BYTE: return (address)jni_fast_GetByteField_wrapper; |
|
2300 |
case T_CHAR: return (address)jni_fast_GetCharField_wrapper; |
|
2301 |
case T_SHORT: return (address)jni_fast_GetShortField_wrapper; |
|
2302 |
case T_INT: return (address)jni_fast_GetIntField_wrapper; |
|
2303 |
case T_LONG: return (address)jni_fast_GetLongField_wrapper; |
|
2304 |
case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; |
|
2305 |
case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; |
|
2306 |
default: ShouldNotReachHere(); |
|
2307 |
} |
|
2308 |
return (address)-1; |
|
2309 |
} |
|
2310 |
#endif |
|
2311 |
||
2312 |
// Virtual Memory |
|
2313 |
||
2314 |
int os::vm_page_size() { return os::win32::vm_page_size(); } |
|
2315 |
int os::vm_allocation_granularity() { |
|
2316 |
return os::win32::vm_allocation_granularity(); |
|
2317 |
} |
|
2318 |
||
2319 |
// Windows large page support is available on Windows 2003. In order to use |
|
2320 |
// large page memory, the administrator must first assign additional privilege |
|
2321 |
// to the user: |
|
2322 |
// + select Control Panel -> Administrative Tools -> Local Security Policy |
|
2323 |
// + select Local Policies -> User Rights Assignment |
|
2324 |
// + double click "Lock pages in memory", add users and/or groups |
|
2325 |
// + reboot |
|
2326 |
// Note the above steps are needed for administrator as well, as administrators |
|
2327 |
// by default do not have the privilege to lock pages in memory. |
|
2328 |
// |
|
2329 |
// Note about Windows 2003: although the API supports committing large page |
|
2330 |
// memory on a page-by-page basis and VirtualAlloc() returns success under this |
|
2331 |
// scenario, I found through experiment it only uses large page if the entire |
|
2332 |
// memory region is reserved and committed in a single VirtualAlloc() call. |
|
2333 |
// This makes Windows large page support more or less like Solaris ISM, in |
|
2334 |
// that the entire heap must be committed upfront. This probably will change |
|
2335 |
// in the future, if so the code below needs to be revisited. |
|
2336 |
||
2337 |
#ifndef MEM_LARGE_PAGES |
|
2338 |
#define MEM_LARGE_PAGES 0x20000000 |
|
2339 |
#endif |
|
2340 |
||
2341 |
// GetLargePageMinimum is only available on Windows 2003. The other functions |
|
2342 |
// are available on NT but not on Windows 98/Me. We have to resolve them at |
|
2343 |
// runtime. |
|
2344 |
typedef SIZE_T (WINAPI *GetLargePageMinimum_func_type) (void); |
|
2345 |
typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type) |
|
2346 |
(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); |
|
2347 |
typedef BOOL (WINAPI *OpenProcessToken_func_type) (HANDLE, DWORD, PHANDLE); |
|
2348 |
typedef BOOL (WINAPI *LookupPrivilegeValue_func_type) (LPCTSTR, LPCTSTR, PLUID); |
|
2349 |
||
2350 |
static GetLargePageMinimum_func_type _GetLargePageMinimum; |
|
2351 |
static AdjustTokenPrivileges_func_type _AdjustTokenPrivileges; |
|
2352 |
static OpenProcessToken_func_type _OpenProcessToken; |
|
2353 |
static LookupPrivilegeValue_func_type _LookupPrivilegeValue; |
|
2354 |
||
2355 |
static HINSTANCE _kernel32; |
|
2356 |
static HINSTANCE _advapi32; |
|
2357 |
static HANDLE _hProcess; |
|
2358 |
static HANDLE _hToken; |
|
2359 |
||
2360 |
static size_t _large_page_size = 0; |
|
2361 |
||
2362 |
static bool resolve_functions_for_large_page_init() { |
|
2363 |
_kernel32 = LoadLibrary("kernel32.dll"); |
|
2364 |
if (_kernel32 == NULL) return false; |
|
2365 |
||
2366 |
_GetLargePageMinimum = CAST_TO_FN_PTR(GetLargePageMinimum_func_type, |
|
2367 |
GetProcAddress(_kernel32, "GetLargePageMinimum")); |
|
2368 |
if (_GetLargePageMinimum == NULL) return false; |
|
2369 |
||
2370 |
_advapi32 = LoadLibrary("advapi32.dll"); |
|
2371 |
if (_advapi32 == NULL) return false; |
|
2372 |
||
2373 |
_AdjustTokenPrivileges = CAST_TO_FN_PTR(AdjustTokenPrivileges_func_type, |
|
2374 |
GetProcAddress(_advapi32, "AdjustTokenPrivileges")); |
|
2375 |
_OpenProcessToken = CAST_TO_FN_PTR(OpenProcessToken_func_type, |
|
2376 |
GetProcAddress(_advapi32, "OpenProcessToken")); |
|
2377 |
_LookupPrivilegeValue = CAST_TO_FN_PTR(LookupPrivilegeValue_func_type, |
|
2378 |
GetProcAddress(_advapi32, "LookupPrivilegeValueA")); |
|
2379 |
return _AdjustTokenPrivileges != NULL && |
|
2380 |
_OpenProcessToken != NULL && |
|
2381 |
_LookupPrivilegeValue != NULL; |
|
2382 |
} |
|
2383 |
||
2384 |
static bool request_lock_memory_privilege() { |
|
2385 |
_hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, |
|
2386 |
os::current_process_id()); |
|
2387 |
||
2388 |
LUID luid; |
|
2389 |
if (_hProcess != NULL && |
|
2390 |
_OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && |
|
2391 |
_LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { |
|
2392 |
||
2393 |
TOKEN_PRIVILEGES tp; |
|
2394 |
tp.PrivilegeCount = 1; |
|
2395 |
tp.Privileges[0].Luid = luid; |
|
2396 |
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; |
|
2397 |
||
2398 |
// AdjustTokenPrivileges() may return TRUE even when it couldn't change the |
|
2399 |
// privilege. Check GetLastError() too. See MSDN document. |
|
2400 |
if (_AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && |
|
2401 |
(GetLastError() == ERROR_SUCCESS)) { |
|
2402 |
return true; |
|
2403 |
} |
|
2404 |
} |
|
2405 |
||
2406 |
return false; |
|
2407 |
} |
|
2408 |
||
2409 |
static void cleanup_after_large_page_init() { |
|
2410 |
_GetLargePageMinimum = NULL; |
|
2411 |
_AdjustTokenPrivileges = NULL; |
|
2412 |
_OpenProcessToken = NULL; |
|
2413 |
_LookupPrivilegeValue = NULL; |
|
2414 |
if (_kernel32) FreeLibrary(_kernel32); |
|
2415 |
_kernel32 = NULL; |
|
2416 |
if (_advapi32) FreeLibrary(_advapi32); |
|
2417 |
_advapi32 = NULL; |
|
2418 |
if (_hProcess) CloseHandle(_hProcess); |
|
2419 |
_hProcess = NULL; |
|
2420 |
if (_hToken) CloseHandle(_hToken); |
|
2421 |
_hToken = NULL; |
|
2422 |
} |
|
2423 |
||
2424 |
bool os::large_page_init() { |
|
2425 |
if (!UseLargePages) return false; |
|
2426 |
||
2427 |
// print a warning if any large page related flag is specified on command line |
|
2428 |
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || |
|
2429 |
!FLAG_IS_DEFAULT(LargePageSizeInBytes); |
|
2430 |
bool success = false; |
|
2431 |
||
2432 |
# define WARN(msg) if (warn_on_failure) { warning(msg); } |
|
2433 |
if (resolve_functions_for_large_page_init()) { |
|
2434 |
if (request_lock_memory_privilege()) { |
|
2435 |
size_t s = _GetLargePageMinimum(); |
|
2436 |
if (s) { |
|
2437 |
#if defined(IA32) || defined(AMD64) |
|
2438 |
if (s > 4*M || LargePageSizeInBytes > 4*M) { |
|
2439 |
WARN("JVM cannot use large pages bigger than 4mb."); |
|
2440 |
} else { |
|
2441 |
#endif |
|
2442 |
if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { |
|
2443 |
_large_page_size = LargePageSizeInBytes; |
|
2444 |
} else { |
|
2445 |
_large_page_size = s; |
|
2446 |
} |
|
2447 |
success = true; |
|
2448 |
#if defined(IA32) || defined(AMD64) |
|
2449 |
} |
|
2450 |
#endif |
|
2451 |
} else { |
|
2452 |
WARN("Large page is not supported by the processor."); |
|
2453 |
} |
|
2454 |
} else { |
|
2455 |
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); |
|
2456 |
} |
|
2457 |
} else { |
|
2458 |
WARN("Large page is not supported by the operating system."); |
|
2459 |
} |
|
2460 |
#undef WARN |
|
2461 |
||
2462 |
const size_t default_page_size = (size_t) vm_page_size(); |
|
2463 |
if (success && _large_page_size > default_page_size) { |
|
2464 |
_page_sizes[0] = _large_page_size; |
|
2465 |
_page_sizes[1] = default_page_size; |
|
2466 |
_page_sizes[2] = 0; |
|
2467 |
} |
|
2468 |
||
2469 |
cleanup_after_large_page_init(); |
|
2470 |
return success; |
|
2471 |
} |
|
2472 |
||
2473 |
// On win32, one cannot release just a part of reserved memory, it's an |
|
2474 |
// all or nothing deal. When we split a reservation, we must break the |
|
2475 |
// reservation into two reservations. |
|
2476 |
void os::split_reserved_memory(char *base, size_t size, size_t split, |
|
2477 |
bool realloc) { |
|
2478 |
if (size > 0) { |
|
2479 |
release_memory(base, size); |
|
2480 |
if (realloc) { |
|
2481 |
reserve_memory(split, base); |
|
2482 |
} |
|
2483 |
if (size != split) { |
|
2484 |
reserve_memory(size - split, base + split); |
|
2485 |
} |
|
2486 |
} |
|
2487 |
} |
|
2488 |
||
2489 |
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { |
|
2490 |
assert((size_t)addr % os::vm_allocation_granularity() == 0, |
|
2491 |
"reserve alignment"); |
|
2492 |
assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); |
|
2493 |
char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, |
|
2494 |
PAGE_EXECUTE_READWRITE); |
|
2495 |
assert(res == NULL || addr == NULL || addr == res, |
|
2496 |
"Unexpected address from reserve."); |
|
2497 |
return res; |
|
2498 |
} |
|
2499 |
||
2500 |
// Reserve memory at an arbitrary address, only if that area is |
|
2501 |
// available (and not reserved for something else). |
|
2502 |
char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { |
|
2503 |
// Windows os::reserve_memory() fails of the requested address range is |
|
2504 |
// not avilable. |
|
2505 |
return reserve_memory(bytes, requested_addr); |
|
2506 |
} |
|
2507 |
||
2508 |
size_t os::large_page_size() { |
|
2509 |
return _large_page_size; |
|
2510 |
} |
|
2511 |
||
2512 |
bool os::can_commit_large_page_memory() { |
|
2513 |
// Windows only uses large page memory when the entire region is reserved |
|
2514 |
// and committed in a single VirtualAlloc() call. This may change in the |
|
2515 |
// future, but with Windows 2003 it's not possible to commit on demand. |
|
2516 |
return false; |
|
2517 |
} |
|
2518 |
||
252
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2519 |
bool os::can_execute_large_page_memory() { |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2520 |
return true; |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2521 |
} |
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2522 |
|
1 | 2523 |
char* os::reserve_memory_special(size_t bytes) { |
2524 |
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; |
|
252
050143a0dbfb
6642862: Code cache allocation fails with large pages after 6588638
jcoomes
parents:
235
diff
changeset
|
2525 |
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_EXECUTE_READWRITE); |
1 | 2526 |
return res; |
2527 |
} |
|
2528 |
||
2529 |
bool os::release_memory_special(char* base, size_t bytes) { |
|
2530 |
return release_memory(base, bytes); |
|
2531 |
} |
|
2532 |
||
2533 |
void os::print_statistics() { |
|
2534 |
} |
|
2535 |
||
2536 |
bool os::commit_memory(char* addr, size_t bytes) { |
|
2537 |
if (bytes == 0) { |
|
2538 |
// Don't bother the OS with noops. |
|
2539 |
return true; |
|
2540 |
} |
|
2541 |
assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); |
|
2542 |
assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); |
|
2543 |
// Don't attempt to print anything if the OS call fails. We're |
|
2544 |
// probably low on resources, so the print itself may cause crashes. |
|
2545 |
return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL; |
|
2546 |
} |
|
2547 |
||
2548 |
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) { |
|
2549 |
return commit_memory(addr, size); |
|
2550 |
} |
|
2551 |
||
2552 |
bool os::uncommit_memory(char* addr, size_t bytes) { |
|
2553 |
if (bytes == 0) { |
|
2554 |
// Don't bother the OS with noops. |
|
2555 |
return true; |
|
2556 |
} |
|
2557 |
assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); |
|
2558 |
assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); |
|
2559 |
return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0; |
|
2560 |
} |
|
2561 |
||
2562 |
bool os::release_memory(char* addr, size_t bytes) { |
|
2563 |
return VirtualFree(addr, 0, MEM_RELEASE) != 0; |
|
2564 |
} |
|
2565 |
||
2566 |
bool os::protect_memory(char* addr, size_t bytes) { |
|
2567 |
DWORD old_status; |
|
2568 |
return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0; |
|
2569 |
} |
|
2570 |
||
2571 |
bool os::guard_memory(char* addr, size_t bytes) { |
|
2572 |
DWORD old_status; |
|
2573 |
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE | PAGE_GUARD, &old_status) != 0; |
|
2574 |
} |
|
2575 |
||
2576 |
bool os::unguard_memory(char* addr, size_t bytes) { |
|
2577 |
DWORD old_status; |
|
2578 |
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &old_status) != 0; |
|
2579 |
} |
|
2580 |
||
2581 |
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } |
|
2582 |
void os::free_memory(char *addr, size_t bytes) { } |
|
2583 |
void os::numa_make_global(char *addr, size_t bytes) { } |
|
2584 |
void os::numa_make_local(char *addr, size_t bytes) { } |
|
2585 |
bool os::numa_topology_changed() { return false; } |
|
2586 |
size_t os::numa_get_groups_num() { return 1; } |
|
2587 |
int os::numa_get_group_id() { return 0; } |
|
2588 |
size_t os::numa_get_leaf_groups(int *ids, size_t size) { |
|
2589 |
if (size > 0) { |
|
2590 |
ids[0] = 0; |
|
2591 |
return 1; |
|
2592 |
} |
|
2593 |
return 0; |
|
2594 |
} |
|
2595 |
||
2596 |
bool os::get_page_info(char *start, page_info* info) { |
|
2597 |
return false; |
|
2598 |
} |
|
2599 |
||
2600 |
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { |
|
2601 |
return end; |
|
2602 |
} |
|
2603 |
||
2604 |
char* os::non_memory_address_word() { |
|
2605 |
// Must never look like an address returned by reserve_memory, |
|
2606 |
// even in its subfields (as defined by the CPU immediate fields, |
|
2607 |
// if the CPU splits constants across multiple instructions). |
|
2608 |
return (char*)-1; |
|
2609 |
} |
|
2610 |
||
2611 |
#define MAX_ERROR_COUNT 100 |
|
2612 |
#define SYS_THREAD_ERROR 0xffffffffUL |
|
2613 |
||
2614 |
void os::pd_start_thread(Thread* thread) { |
|
2615 |
DWORD ret = ResumeThread(thread->osthread()->thread_handle()); |
|
2616 |
// Returns previous suspend state: |
|
2617 |
// 0: Thread was not suspended |
|
2618 |
// 1: Thread is running now |
|
2619 |
// >1: Thread is still suspended. |
|
2620 |
assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back |
|
2621 |
} |
|
2622 |
||
2623 |
size_t os::read(int fd, void *buf, unsigned int nBytes) { |
|
2624 |
return ::read(fd, buf, nBytes); |
|
2625 |
} |
|
2626 |
||
2627 |
class HighResolutionInterval { |
|
2628 |
// The default timer resolution seems to be 10 milliseconds. |
|
2629 |
// (Where is this written down?) |
|
2630 |
// If someone wants to sleep for only a fraction of the default, |
|
2631 |
// then we set the timer resolution down to 1 millisecond for |
|
2632 |
// the duration of their interval. |
|
2633 |
// We carefully set the resolution back, since otherwise we |
|
2634 |
// seem to incur an overhead (3%?) that we don't need. |
|
2635 |
// CONSIDER: if ms is small, say 3, then we should run with a high resolution time. |
|
2636 |
// Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). |
|
2637 |
// Alternatively, we could compute the relative error (503/500 = .6%) and only use |
|
2638 |
// timeBeginPeriod() if the relative error exceeded some threshold. |
|
2639 |
// timeBeginPeriod() has been linked to problems with clock drift on win32 systems and |
|
2640 |
// to decreased efficiency related to increased timer "tick" rates. We want to minimize |
|
2641 |
// (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high |
|
2642 |
// resolution timers running. |
|
2643 |
private: |
|
2644 |
jlong resolution; |
|
2645 |
public: |
|
2646 |
HighResolutionInterval(jlong ms) { |
|
2647 |
resolution = ms % 10L; |
|
2648 |
if (resolution != 0) { |
|
2649 |
MMRESULT result = timeBeginPeriod(1L); |
|
2650 |
} |
|
2651 |
} |
|
2652 |
~HighResolutionInterval() { |
|
2653 |
if (resolution != 0) { |
|
2654 |
MMRESULT result = timeEndPeriod(1L); |
|
2655 |
} |
|
2656 |
resolution = 0L; |
|
2657 |
} |
|
2658 |
}; |
|
2659 |
||
2660 |
int os::sleep(Thread* thread, jlong ms, bool interruptable) { |
|
2661 |
jlong limit = (jlong) MAXDWORD; |
|
2662 |
||
2663 |
while(ms > limit) { |
|
2664 |
int res; |
|
2665 |
if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) |
|
2666 |
return res; |
|
2667 |
ms -= limit; |
|
2668 |
} |
|
2669 |
||
2670 |
assert(thread == Thread::current(), "thread consistency check"); |
|
2671 |
OSThread* osthread = thread->osthread(); |
|
2672 |
OSThreadWaitState osts(osthread, false /* not Object.wait() */); |
|
2673 |
int result; |
|
2674 |
if (interruptable) { |
|
2675 |
assert(thread->is_Java_thread(), "must be java thread"); |
|
2676 |
JavaThread *jt = (JavaThread *) thread; |
|
2677 |
ThreadBlockInVM tbivm(jt); |
|
2678 |
||
2679 |
jt->set_suspend_equivalent(); |
|
2680 |
// cleared by handle_special_suspend_equivalent_condition() or |
|
2681 |
// java_suspend_self() via check_and_wait_while_suspended() |
|
2682 |
||
2683 |
HANDLE events[1]; |
|
2684 |
events[0] = osthread->interrupt_event(); |
|
2685 |
HighResolutionInterval *phri=NULL; |
|
2686 |
if(!ForceTimeHighResolution) |
|
2687 |
phri = new HighResolutionInterval( ms ); |
|
2688 |
if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { |
|
2689 |
result = OS_TIMEOUT; |
|
2690 |
} else { |
|
2691 |
ResetEvent(osthread->interrupt_event()); |
|
2692 |
osthread->set_interrupted(false); |
|
2693 |
result = OS_INTRPT; |
|
2694 |
} |
|
2695 |
delete phri; //if it is NULL, harmless |
|
2696 |
||
2697 |
// were we externally suspended while we were waiting? |
|
2698 |
jt->check_and_wait_while_suspended(); |
|
2699 |
} else { |
|
2700 |
assert(!thread->is_Java_thread(), "must not be java thread"); |
|
2701 |
Sleep((long) ms); |
|
2702 |
result = OS_TIMEOUT; |
|
2703 |
} |
|
2704 |
return result; |
|
2705 |
} |
|
2706 |
||
2707 |
// Sleep forever; naked call to OS-specific sleep; use with CAUTION |
|
2708 |
void os::infinite_sleep() { |
|
2709 |
while (true) { // sleep forever ... |
|
2710 |
Sleep(100000); // ... 100 seconds at a time |
|
2711 |
} |
|
2712 |
} |
|
2713 |
||
2714 |
typedef BOOL (WINAPI * STTSignature)(void) ; |
|
2715 |
||
2716 |
os::YieldResult os::NakedYield() { |
|
2717 |
// Use either SwitchToThread() or Sleep(0) |
|
2718 |
// Consider passing back the return value from SwitchToThread(). |
|
2719 |
// We use GetProcAddress() as ancient Win9X versions of windows doen't support SwitchToThread. |
|
2720 |
// In that case we revert to Sleep(0). |
|
2721 |
static volatile STTSignature stt = (STTSignature) 1 ; |
|
2722 |
||
2723 |
if (stt == ((STTSignature) 1)) { |
|
2724 |
stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ; |
|
2725 |
// It's OK if threads race during initialization as the operation above is idempotent. |
|
2726 |
} |
|
2727 |
if (stt != NULL) { |
|
2728 |
return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; |
|
2729 |
} else { |
|
2730 |
Sleep (0) ; |
|
2731 |
} |
|
2732 |
return os::YIELD_UNKNOWN ; |
|
2733 |
} |
|
2734 |
||
2735 |
void os::yield() { os::NakedYield(); } |
|
2736 |
||
2737 |
void os::yield_all(int attempts) { |
|
2738 |
// Yields to all threads, including threads with lower priorities |
|
2739 |
Sleep(1); |
|
2740 |
} |
|
2741 |
||
2742 |
// Win32 only gives you access to seven real priorities at a time, |
|
2743 |
// so we compress Java's ten down to seven. It would be better |
|
2744 |
// if we dynamically adjusted relative priorities. |
|
2745 |
||
2746 |
int os::java_to_os_priority[MaxPriority + 1] = { |
|
2747 |
THREAD_PRIORITY_IDLE, // 0 Entry should never be used |
|
2748 |
THREAD_PRIORITY_LOWEST, // 1 MinPriority |
|
2749 |
THREAD_PRIORITY_LOWEST, // 2 |
|
2750 |
THREAD_PRIORITY_BELOW_NORMAL, // 3 |
|
2751 |
THREAD_PRIORITY_BELOW_NORMAL, // 4 |
|
2752 |
THREAD_PRIORITY_NORMAL, // 5 NormPriority |
|
2753 |
THREAD_PRIORITY_NORMAL, // 6 |
|
2754 |
THREAD_PRIORITY_ABOVE_NORMAL, // 7 |
|
2755 |
THREAD_PRIORITY_ABOVE_NORMAL, // 8 |
|
2756 |
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority |
|
2757 |
THREAD_PRIORITY_HIGHEST // 10 MaxPriority |
|
2758 |
}; |
|
2759 |
||
2760 |
int prio_policy1[MaxPriority + 1] = { |
|
2761 |
THREAD_PRIORITY_IDLE, // 0 Entry should never be used |
|
2762 |
THREAD_PRIORITY_LOWEST, // 1 MinPriority |
|
2763 |
THREAD_PRIORITY_LOWEST, // 2 |
|
2764 |
THREAD_PRIORITY_BELOW_NORMAL, // 3 |
|
2765 |
THREAD_PRIORITY_BELOW_NORMAL, // 4 |
|
2766 |
THREAD_PRIORITY_NORMAL, // 5 NormPriority |
|
2767 |
THREAD_PRIORITY_ABOVE_NORMAL, // 6 |
|
2768 |
THREAD_PRIORITY_ABOVE_NORMAL, // 7 |
|
2769 |
THREAD_PRIORITY_HIGHEST, // 8 |
|
2770 |
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority |
|
2771 |
THREAD_PRIORITY_TIME_CRITICAL // 10 MaxPriority |
|
2772 |
}; |
|
2773 |
||
2774 |
static int prio_init() { |
|
2775 |
// If ThreadPriorityPolicy is 1, switch tables |
|
2776 |
if (ThreadPriorityPolicy == 1) { |
|
2777 |
int i; |
|
2778 |
for (i = 0; i < MaxPriority + 1; i++) { |
|
2779 |
os::java_to_os_priority[i] = prio_policy1[i]; |
|
2780 |
} |
|
2781 |
} |
|
2782 |
return 0; |
|
2783 |
} |
|
2784 |
||
2785 |
OSReturn os::set_native_priority(Thread* thread, int priority) { |
|
2786 |
if (!UseThreadPriorities) return OS_OK; |
|
2787 |
bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; |
|
2788 |
return ret ? OS_OK : OS_ERR; |
|
2789 |
} |
|
2790 |
||
2791 |
OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { |
|
2792 |
if ( !UseThreadPriorities ) { |
|
2793 |
*priority_ptr = java_to_os_priority[NormPriority]; |
|
2794 |
return OS_OK; |
|
2795 |
} |
|
2796 |
int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); |
|
2797 |
if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { |
|
2798 |
assert(false, "GetThreadPriority failed"); |
|
2799 |
return OS_ERR; |
|
2800 |
} |
|
2801 |
*priority_ptr = os_prio; |
|
2802 |
return OS_OK; |
|
2803 |
} |
|
2804 |
||
2805 |
||
2806 |
// Hint to the underlying OS that a task switch would not be good. |
|
2807 |
// Void return because it's a hint and can fail. |
|
2808 |
void os::hint_no_preempt() {} |
|
2809 |
||
2810 |
void os::interrupt(Thread* thread) { |
|
2811 |
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), |
|
2812 |
"possibility of dangling Thread pointer"); |
|
2813 |
||
2814 |
OSThread* osthread = thread->osthread(); |
|
2815 |
osthread->set_interrupted(true); |
|
2816 |
// More than one thread can get here with the same value of osthread, |
|
2817 |
// resulting in multiple notifications. We do, however, want the store |
|
2818 |
// to interrupted() to be visible to other threads before we post |
|
2819 |
// the interrupt event. |
|
2820 |
OrderAccess::release(); |
|
2821 |
SetEvent(osthread->interrupt_event()); |
|
2822 |
// For JSR166: unpark after setting status |
|
2823 |
if (thread->is_Java_thread()) |
|
2824 |
((JavaThread*)thread)->parker()->unpark(); |
|
2825 |
||
2826 |
ParkEvent * ev = thread->_ParkEvent ; |
|
2827 |
if (ev != NULL) ev->unpark() ; |
|
2828 |
||
2829 |
} |
|
2830 |
||
2831 |
||
2832 |
bool os::is_interrupted(Thread* thread, bool clear_interrupted) { |
|
2833 |
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), |
|
2834 |
"possibility of dangling Thread pointer"); |
|
2835 |
||
2836 |
OSThread* osthread = thread->osthread(); |
|
2837 |
bool interrupted; |
|
2838 |
interrupted = osthread->interrupted(); |
|
2839 |
if (clear_interrupted == true) { |
|
2840 |
osthread->set_interrupted(false); |
|
2841 |
ResetEvent(osthread->interrupt_event()); |
|
2842 |
} // Otherwise leave the interrupted state alone |
|
2843 |
||
2844 |
return interrupted; |
|
2845 |
} |
|
2846 |
||
2847 |
// Get's a pc (hint) for a running thread. Currently used only for profiling. |
|
2848 |
ExtendedPC os::get_thread_pc(Thread* thread) { |
|
2849 |
CONTEXT context; |
|
2850 |
context.ContextFlags = CONTEXT_CONTROL; |
|
2851 |
HANDLE handle = thread->osthread()->thread_handle(); |
|
2852 |
#ifdef _M_IA64 |
|
2853 |
assert(0, "Fix get_thread_pc"); |
|
2854 |
return ExtendedPC(NULL); |
|
2855 |
#else |
|
2856 |
if (GetThreadContext(handle, &context)) { |
|
2857 |
#ifdef _M_AMD64 |
|
2858 |
return ExtendedPC((address) context.Rip); |
|
2859 |
#else |
|
2860 |
return ExtendedPC((address) context.Eip); |
|
2861 |
#endif |
|
2862 |
} else { |
|
2863 |
return ExtendedPC(NULL); |
|
2864 |
} |
|
2865 |
#endif |
|
2866 |
} |
|
2867 |
||
2868 |
// GetCurrentThreadId() returns DWORD |
|
2869 |
intx os::current_thread_id() { return GetCurrentThreadId(); } |
|
2870 |
||
2871 |
static int _initial_pid = 0; |
|
2872 |
||
2873 |
int os::current_process_id() |
|
2874 |
{ |
|
2875 |
return (_initial_pid ? _initial_pid : _getpid()); |
|
2876 |
} |
|
2877 |
||
2878 |
int os::win32::_vm_page_size = 0; |
|
2879 |
int os::win32::_vm_allocation_granularity = 0; |
|
2880 |
int os::win32::_processor_type = 0; |
|
2881 |
// Processor level is not available on non-NT systems, use vm_version instead |
|
2882 |
int os::win32::_processor_level = 0; |
|
2883 |
julong os::win32::_physical_memory = 0; |
|
2884 |
size_t os::win32::_default_stack_size = 0; |
|
2885 |
||
2886 |
intx os::win32::_os_thread_limit = 0; |
|
2887 |
volatile intx os::win32::_os_thread_count = 0; |
|
2888 |
||
2889 |
bool os::win32::_is_nt = false; |
|
2890 |
||
2891 |
||
2892 |
void os::win32::initialize_system_info() { |
|
2893 |
SYSTEM_INFO si; |
|
2894 |
GetSystemInfo(&si); |
|
2895 |
_vm_page_size = si.dwPageSize; |
|
2896 |
_vm_allocation_granularity = si.dwAllocationGranularity; |
|
2897 |
_processor_type = si.dwProcessorType; |
|
2898 |
_processor_level = si.wProcessorLevel; |
|
2899 |
_processor_count = si.dwNumberOfProcessors; |
|
2900 |
||
2901 |
MEMORYSTATUS ms; |
|
2902 |
// also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, |
|
2903 |
// dwMemoryLoad (% of memory in use) |
|
2904 |
GlobalMemoryStatus(&ms); |
|
2905 |
_physical_memory = ms.dwTotalPhys; |
|
2906 |
||
2907 |
OSVERSIONINFO oi; |
|
2908 |
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); |
|
2909 |
GetVersionEx(&oi); |
|
2910 |
switch(oi.dwPlatformId) { |
|
2911 |
case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; |
|
2912 |
case VER_PLATFORM_WIN32_NT: _is_nt = true; break; |
|
2913 |
default: fatal("Unknown platform"); |
|
2914 |
} |
|
2915 |
||
2916 |
_default_stack_size = os::current_stack_size(); |
|
2917 |
assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); |
|
2918 |
assert((_default_stack_size & (_vm_page_size - 1)) == 0, |
|
2919 |
"stack size not a multiple of page size"); |
|
2920 |
||
2921 |
initialize_performance_counter(); |
|
2922 |
||
2923 |
// Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is |
|
2924 |
// known to deadlock the system, if the VM issues to thread operations with |
|
2925 |
// a too high frequency, e.g., such as changing the priorities. |
|
2926 |
// The 6000 seems to work well - no deadlocks has been notices on the test |
|
2927 |
// programs that we have seen experience this problem. |
|
2928 |
if (!os::win32::is_nt()) { |
|
2929 |
StarvationMonitorInterval = 6000; |
|
2930 |
} |
|
2931 |
} |
|
2932 |
||
2933 |
||
2934 |
void os::win32::setmode_streams() { |
|
2935 |
_setmode(_fileno(stdin), _O_BINARY); |
|
2936 |
_setmode(_fileno(stdout), _O_BINARY); |
|
2937 |
_setmode(_fileno(stderr), _O_BINARY); |
|
2938 |
} |
|
2939 |
||
2940 |
||
2941 |
int os::message_box(const char* title, const char* message) { |
|
2942 |
int result = MessageBox(NULL, message, title, |
|
2943 |
MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); |
|
2944 |
return result == IDYES; |
|
2945 |
} |
|
2946 |
||
2947 |
int os::allocate_thread_local_storage() { |
|
2948 |
return TlsAlloc(); |
|
2949 |
} |
|
2950 |
||
2951 |
||
2952 |
void os::free_thread_local_storage(int index) { |
|
2953 |
TlsFree(index); |
|
2954 |
} |
|
2955 |
||
2956 |
||
2957 |
void os::thread_local_storage_at_put(int index, void* value) { |
|
2958 |
TlsSetValue(index, value); |
|
2959 |
assert(thread_local_storage_at(index) == value, "Just checking"); |
|
2960 |
} |
|
2961 |
||
2962 |
||
2963 |
void* os::thread_local_storage_at(int index) { |
|
2964 |
return TlsGetValue(index); |
|
2965 |
} |
|
2966 |
||
2967 |
||
2968 |
#ifndef PRODUCT |
|
2969 |
#ifndef _WIN64 |
|
2970 |
// Helpers to check whether NX protection is enabled |
|
2971 |
int nx_exception_filter(_EXCEPTION_POINTERS *pex) { |
|
2972 |
if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && |
|
2973 |
pex->ExceptionRecord->NumberParameters > 0 && |
|
2974 |
pex->ExceptionRecord->ExceptionInformation[0] == |
|
2975 |
EXCEPTION_INFO_EXEC_VIOLATION) { |
|
2976 |
return EXCEPTION_EXECUTE_HANDLER; |
|
2977 |
} |
|
2978 |
return EXCEPTION_CONTINUE_SEARCH; |
|
2979 |
} |
|
2980 |
||
2981 |
void nx_check_protection() { |
|
2982 |
// If NX is enabled we'll get an exception calling into code on the stack |
|
2983 |
char code[] = { (char)0xC3 }; // ret |
|
2984 |
void *code_ptr = (void *)code; |
|
2985 |
__try { |
|
2986 |
__asm call code_ptr |
|
2987 |
} __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { |
|
2988 |
tty->print_raw_cr("NX protection detected."); |
|
2989 |
} |
|
2990 |
} |
|
2991 |
#endif // _WIN64 |
|
2992 |
#endif // PRODUCT |
|
2993 |
||
2994 |
// this is called _before_ the global arguments have been parsed |
|
2995 |
void os::init(void) { |
|
2996 |
_initial_pid = _getpid(); |
|
2997 |
||
2998 |
init_random(1234567); |
|
2999 |
||
3000 |
win32::initialize_system_info(); |
|
3001 |
win32::setmode_streams(); |
|
3002 |
init_page_sizes((size_t) win32::vm_page_size()); |
|
3003 |
||
3004 |
// For better scalability on MP systems (must be called after initialize_system_info) |
|
3005 |
#ifndef PRODUCT |
|
3006 |
if (is_MP()) { |
|
3007 |
NoYieldsInMicrolock = true; |
|
3008 |
} |
|
3009 |
#endif |
|
3010 |
// Initialize main_process and main_thread |
|
3011 |
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle |
|
3012 |
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, |
|
3013 |
&main_thread, THREAD_ALL_ACCESS, false, 0)) { |
|
3014 |
fatal("DuplicateHandle failed\n"); |
|
3015 |
} |
|
3016 |
main_thread_id = (int) GetCurrentThreadId(); |
|
3017 |
} |
|
3018 |
||
3019 |
// To install functions for atexit processing |
|
3020 |
extern "C" { |
|
3021 |
static void perfMemory_exit_helper() { |
|
3022 |
perfMemory_exit(); |
|
3023 |
} |
|
3024 |
} |
|
3025 |
||
3026 |
||
3027 |
// this is called _after_ the global arguments have been parsed |
|
3028 |
jint os::init_2(void) { |
|
3029 |
// Allocate a single page and mark it as readable for safepoint polling |
|
3030 |
address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); |
|
3031 |
guarantee( polling_page != NULL, "Reserve Failed for polling page"); |
|
3032 |
||
3033 |
address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); |
|
3034 |
guarantee( return_page != NULL, "Commit Failed for polling page"); |
|
3035 |
||
3036 |
os::set_polling_page( polling_page ); |
|
3037 |
||
3038 |
#ifndef PRODUCT |
|
3039 |
if( Verbose && PrintMiscellaneous ) |
|
3040 |
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); |
|
3041 |
#endif |
|
3042 |
||
3043 |
if (!UseMembar) { |
|
3044 |
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_EXECUTE_READWRITE); |
|
3045 |
guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); |
|
3046 |
||
3047 |
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_EXECUTE_READWRITE); |
|
3048 |
guarantee( return_page != NULL, "Commit Failed for memory serialize page"); |
|
3049 |
||
3050 |
os::set_memory_serialize_page( mem_serialize_page ); |
|
3051 |
||
3052 |
#ifndef PRODUCT |
|
3053 |
if(Verbose && PrintMiscellaneous) |
|
3054 |
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); |
|
3055 |
#endif |
|
3056 |
} |
|
3057 |
||
3058 |
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); |
|
3059 |
||
3060 |
// Setup Windows Exceptions |
|
3061 |
||
3062 |
// On Itanium systems, Structured Exception Handling does not |
|
3063 |
// work since stack frames must be walkable by the OS. Since |
|
3064 |
// much of our code is dynamically generated, and we do not have |
|
3065 |
// proper unwind .xdata sections, the system simply exits |
|
3066 |
// rather than delivering the exception. To work around |
|
3067 |
// this we use VectorExceptions instead. |
|
3068 |
#ifdef _WIN64 |
|
3069 |
if (UseVectoredExceptions) { |
|
3070 |
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter); |
|
3071 |
} |
|
3072 |
#endif |
|
3073 |
||
3074 |
// for debugging float code generation bugs |
|
3075 |
if (ForceFloatExceptions) { |
|
3076 |
#ifndef _WIN64 |
|
3077 |
static long fp_control_word = 0; |
|
3078 |
__asm { fstcw fp_control_word } |
|
3079 |
// see Intel PPro Manual, Vol. 2, p 7-16 |
|
3080 |
const long precision = 0x20; |
|
3081 |
const long underflow = 0x10; |
|
3082 |
const long overflow = 0x08; |
|
3083 |
const long zero_div = 0x04; |
|
3084 |
const long denorm = 0x02; |
|
3085 |
const long invalid = 0x01; |
|
3086 |
fp_control_word |= invalid; |
|
3087 |
__asm { fldcw fp_control_word } |
|
3088 |
#endif |
|
3089 |
} |
|
3090 |
||
3091 |
// Initialize HPI. |
|
3092 |
jint hpi_result = hpi::initialize(); |
|
3093 |
if (hpi_result != JNI_OK) { return hpi_result; } |
|
3094 |
||
3095 |
// If stack_commit_size is 0, windows will reserve the default size, |
|
3096 |
// but only commit a small portion of it. |
|
3097 |
size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); |
|
3098 |
size_t default_reserve_size = os::win32::default_stack_size(); |
|
3099 |
size_t actual_reserve_size = stack_commit_size; |
|
3100 |
if (stack_commit_size < default_reserve_size) { |
|
3101 |
// If stack_commit_size == 0, we want this too |
|
3102 |
actual_reserve_size = default_reserve_size; |
|
3103 |
} |
|
3104 |
||
3105 |
JavaThread::set_stack_size_at_create(stack_commit_size); |
|
3106 |
||
3107 |
// Calculate theoretical max. size of Threads to guard gainst artifical |
|
3108 |
// out-of-memory situations, where all available address-space has been |
|
3109 |
// reserved by thread stacks. |
|
3110 |
assert(actual_reserve_size != 0, "Must have a stack"); |
|
3111 |
||
3112 |
// Calculate the thread limit when we should start doing Virtual Memory |
|
3113 |
// banging. Currently when the threads will have used all but 200Mb of space. |
|
3114 |
// |
|
3115 |
// TODO: consider performing a similar calculation for commit size instead |
|
3116 |
// as reserve size, since on a 64-bit platform we'll run into that more |
|
3117 |
// often than running out of virtual memory space. We can use the |
|
3118 |
// lower value of the two calculations as the os_thread_limit. |
|
360
21d113ecbf6a
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
252
diff
changeset
|
3119 |
size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); |
1 | 3120 |
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); |
3121 |
||
3122 |
// at exit methods are called in the reverse order of their registration. |
|
3123 |
// there is no limit to the number of functions registered. atexit does |
|
3124 |
// not set errno. |
|
3125 |
||
3126 |
if (PerfAllowAtExitRegistration) { |
|
3127 |
// only register atexit functions if PerfAllowAtExitRegistration is set. |
|
3128 |
// atexit functions can be delayed until process exit time, which |
|
3129 |
// can be problematic for embedded VM situations. Embedded VMs should |
|
3130 |
// call DestroyJavaVM() to assure that VM resources are released. |
|
3131 |
||
3132 |
// note: perfMemory_exit_helper atexit function may be removed in |
|
3133 |
// the future if the appropriate cleanup code can be added to the |
|
3134 |
// VM_Exit VMOperation's doit method. |
|
3135 |
if (atexit(perfMemory_exit_helper) != 0) { |
|
3136 |
warning("os::init_2 atexit(perfMemory_exit_helper) failed"); |
|
3137 |
} |
|
3138 |
} |
|
3139 |
||
3140 |
// initialize PSAPI or ToolHelp for fatal error handler |
|
3141 |
if (win32::is_nt()) _init_psapi(); |
|
3142 |
else _init_toolhelp(); |
|
3143 |
||
3144 |
#ifndef _WIN64 |
|
3145 |
// Print something if NX is enabled (win32 on AMD64) |
|
3146 |
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); |
|
3147 |
#endif |
|
3148 |
||
3149 |
// initialize thread priority policy |
|
3150 |
prio_init(); |
|
3151 |
||
3152 |
return JNI_OK; |
|
3153 |
} |
|
3154 |
||
3155 |
||
3156 |
// Mark the polling page as unreadable |
|
3157 |
void os::make_polling_page_unreadable(void) { |
|
3158 |
DWORD old_status; |
|
3159 |
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) |
|
3160 |
fatal("Could not disable polling page"); |
|
3161 |
}; |
|
3162 |
||
3163 |
// Mark the polling page as readable |
|
3164 |
void os::make_polling_page_readable(void) { |
|
3165 |
DWORD old_status; |
|
3166 |
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) |
|
3167 |
fatal("Could not enable polling page"); |
|
3168 |
}; |
|
3169 |
||
3170 |
||
3171 |
int os::stat(const char *path, struct stat *sbuf) { |
|
3172 |
char pathbuf[MAX_PATH]; |
|
3173 |
if (strlen(path) > MAX_PATH - 1) { |
|
3174 |
errno = ENAMETOOLONG; |
|
3175 |
return -1; |
|
3176 |
} |
|
3177 |
hpi::native_path(strcpy(pathbuf, path)); |
|
3178 |
int ret = ::stat(pathbuf, sbuf); |
|
3179 |
if (sbuf != NULL && UseUTCFileTimestamp) { |
|
3180 |
// Fix for 6539723. st_mtime returned from stat() is dependent on |
|
3181 |
// the system timezone and so can return different values for the |
|
3182 |
// same file if/when daylight savings time changes. This adjustment |
|
3183 |
// makes sure the same timestamp is returned regardless of the TZ. |
|
3184 |
// |
|
3185 |
// See: |
|
3186 |
// http://msdn.microsoft.com/library/ |
|
3187 |
// default.asp?url=/library/en-us/sysinfo/base/ |
|
3188 |
// time_zone_information_str.asp |
|
3189 |
// and |
|
3190 |
// http://msdn.microsoft.com/library/default.asp?url= |
|
3191 |
// /library/en-us/sysinfo/base/settimezoneinformation.asp |
|
3192 |
// |
|
3193 |
// NOTE: there is a insidious bug here: If the timezone is changed |
|
3194 |
// after the call to stat() but before 'GetTimeZoneInformation()', then |
|
3195 |
// the adjustment we do here will be wrong and we'll return the wrong |
|
3196 |
// value (which will likely end up creating an invalid class data |
|
3197 |
// archive). Absent a better API for this, or some time zone locking |
|
3198 |
// mechanism, we'll have to live with this risk. |
|
3199 |
TIME_ZONE_INFORMATION tz; |
|
3200 |
DWORD tzid = GetTimeZoneInformation(&tz); |
|
3201 |
int daylightBias = |
|
3202 |
(tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; |
|
3203 |
sbuf->st_mtime += (tz.Bias + daylightBias) * 60; |
|
3204 |
} |
|
3205 |
return ret; |
|
3206 |
} |
|
3207 |
||
3208 |
||
3209 |
#define FT2INT64(ft) \ |
|
3210 |
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) |
|
3211 |
||
3212 |
||
3213 |
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) |
|
3214 |
// are used by JVM M&M and JVMTI to get user+sys or user CPU time |
|
3215 |
// of a thread. |
|
3216 |
// |
|
3217 |
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns |
|
3218 |
// the fast estimate available on the platform. |
|
3219 |
||
3220 |
// current_thread_cpu_time() is not optimized for Windows yet |
|
3221 |
jlong os::current_thread_cpu_time() { |
|
3222 |
// return user + sys since the cost is the same |
|
3223 |
return os::thread_cpu_time(Thread::current(), true /* user+sys */); |
|
3224 |
} |
|
3225 |
||
3226 |
jlong os::thread_cpu_time(Thread* thread) { |
|
3227 |
// consistent with what current_thread_cpu_time() returns. |
|
3228 |
return os::thread_cpu_time(thread, true /* user+sys */); |
|
3229 |
} |
|
3230 |
||
3231 |
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { |
|
3232 |
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); |
|
3233 |
} |
|
3234 |
||
3235 |
jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { |
|
3236 |
// This code is copy from clasic VM -> hpi::sysThreadCPUTime |
|
3237 |
// If this function changes, os::is_thread_cpu_time_supported() should too |
|
3238 |
if (os::win32::is_nt()) { |
|
3239 |
FILETIME CreationTime; |
|
3240 |
FILETIME ExitTime; |
|
3241 |
FILETIME KernelTime; |
|
3242 |
FILETIME UserTime; |
|
3243 |
||
3244 |
if ( GetThreadTimes(thread->osthread()->thread_handle(), |
|
3245 |
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) |
|
3246 |
return -1; |
|
3247 |
else |
|
3248 |
if (user_sys_cpu_time) { |
|
3249 |
return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; |
|
3250 |
} else { |
|
3251 |
return FT2INT64(UserTime) * 100; |
|
3252 |
} |
|
3253 |
} else { |
|
3254 |
return (jlong) timeGetTime() * 1000000; |
|
3255 |
} |
|
3256 |
} |
|
3257 |
||
3258 |
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { |
|
3259 |
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits |
|
3260 |
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time |
|
3261 |
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time |
|
3262 |
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned |
|
3263 |
} |
|
3264 |
||
3265 |
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { |
|
3266 |
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits |
|
3267 |
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time |
|
3268 |
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time |
|
3269 |
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned |
|
3270 |
} |
|
3271 |
||
3272 |
bool os::is_thread_cpu_time_supported() { |
|
3273 |
// see os::thread_cpu_time |
|
3274 |
if (os::win32::is_nt()) { |
|
3275 |
FILETIME CreationTime; |
|
3276 |
FILETIME ExitTime; |
|
3277 |
FILETIME KernelTime; |
|
3278 |
FILETIME UserTime; |
|
3279 |
||
3280 |
if ( GetThreadTimes(GetCurrentThread(), |
|
3281 |
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) |
|
3282 |
return false; |
|
3283 |
else |
|
3284 |
return true; |
|
3285 |
} else { |
|
3286 |
return false; |
|
3287 |
} |
|
3288 |
} |
|
3289 |
||
3290 |
// Windows does't provide a loadavg primitive so this is stubbed out for now. |
|
3291 |
// It does have primitives (PDH API) to get CPU usage and run queue length. |
|
3292 |
// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" |
|
3293 |
// If we wanted to implement loadavg on Windows, we have a few options: |
|
3294 |
// |
|
3295 |
// a) Query CPU usage and run queue length and "fake" an answer by |
|
3296 |
// returning the CPU usage if it's under 100%, and the run queue |
|
3297 |
// length otherwise. It turns out that querying is pretty slow |
|
3298 |
// on Windows, on the order of 200 microseconds on a fast machine. |
|
3299 |
// Note that on the Windows the CPU usage value is the % usage |
|
3300 |
// since the last time the API was called (and the first call |
|
3301 |
// returns 100%), so we'd have to deal with that as well. |
|
3302 |
// |
|
3303 |
// b) Sample the "fake" answer using a sampling thread and store |
|
3304 |
// the answer in a global variable. The call to loadavg would |
|
3305 |
// just return the value of the global, avoiding the slow query. |
|
3306 |
// |
|
3307 |
// c) Sample a better answer using exponential decay to smooth the |
|
3308 |
// value. This is basically the algorithm used by UNIX kernels. |
|
3309 |
// |
|
3310 |
// Note that sampling thread starvation could affect both (b) and (c). |
|
3311 |
int os::loadavg(double loadavg[], int nelem) { |
|
3312 |
return -1; |
|
3313 |
} |
|
3314 |
||
3315 |
||
3316 |
// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() |
|
3317 |
bool os::dont_yield() { |
|
3318 |
return DontYieldALot; |
|
3319 |
} |
|
3320 |
||
3321 |
// Is a (classpath) directory empty? |
|
3322 |
bool os::dir_is_empty(const char* path) { |
|
3323 |
WIN32_FIND_DATA fd; |
|
3324 |
HANDLE f = FindFirstFile(path, &fd); |
|
3325 |
if (f == INVALID_HANDLE_VALUE) { |
|
3326 |
return true; |
|
3327 |
} |
|
3328 |
FindClose(f); |
|
3329 |
return false; |
|
3330 |
} |
|
3331 |
||
3332 |
// create binary file, rewriting existing file if required |
|
3333 |
int os::create_binary_file(const char* path, bool rewrite_existing) { |
|
3334 |
int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; |
|
3335 |
if (!rewrite_existing) { |
|
3336 |
oflags |= _O_EXCL; |
|
3337 |
} |
|
3338 |
return ::open(path, oflags, _S_IREAD | _S_IWRITE); |
|
3339 |
} |
|
3340 |
||
3341 |
// return current position of file pointer |
|
3342 |
jlong os::current_file_offset(int fd) { |
|
3343 |
return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); |
|
3344 |
} |
|
3345 |
||
3346 |
// move file pointer to the specified offset |
|
3347 |
jlong os::seek_to_file_offset(int fd, jlong offset) { |
|
3348 |
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); |
|
3349 |
} |
|
3350 |
||
3351 |
||
3352 |
// Map a block of memory. |
|
3353 |
char* os::map_memory(int fd, const char* file_name, size_t file_offset, |
|
3354 |
char *addr, size_t bytes, bool read_only, |
|
3355 |
bool allow_exec) { |
|
3356 |
HANDLE hFile; |
|
3357 |
char* base; |
|
3358 |
||
3359 |
hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, |
|
3360 |
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); |
|
3361 |
if (hFile == NULL) { |
|
3362 |
if (PrintMiscellaneous && Verbose) { |
|
3363 |
DWORD err = GetLastError(); |
|
3364 |
tty->print_cr("CreateFile() failed: GetLastError->%ld."); |
|
3365 |
} |
|
3366 |
return NULL; |
|
3367 |
} |
|
3368 |
||
3369 |
if (allow_exec) { |
|
3370 |
// CreateFileMapping/MapViewOfFileEx can't map executable memory |
|
3371 |
// unless it comes from a PE image (which the shared archive is not.) |
|
3372 |
// Even VirtualProtect refuses to give execute access to mapped memory |
|
3373 |
// that was not previously executable. |
|
3374 |
// |
|
3375 |
// Instead, stick the executable region in anonymous memory. Yuck. |
|
3376 |
// Penalty is that ~4 pages will not be shareable - in the future |
|
3377 |
// we might consider DLLizing the shared archive with a proper PE |
|
3378 |
// header so that mapping executable + sharing is possible. |
|
3379 |
||
3380 |
base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, |
|
3381 |
PAGE_READWRITE); |
|
3382 |
if (base == NULL) { |
|
3383 |
if (PrintMiscellaneous && Verbose) { |
|
3384 |
DWORD err = GetLastError(); |
|
3385 |
tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); |
|
3386 |
} |
|
3387 |
CloseHandle(hFile); |
|
3388 |
return NULL; |
|
3389 |
} |
|
3390 |
||
3391 |
DWORD bytes_read; |
|
3392 |
OVERLAPPED overlapped; |
|
3393 |
overlapped.Offset = (DWORD)file_offset; |
|
3394 |
overlapped.OffsetHigh = 0; |
|
3395 |
overlapped.hEvent = NULL; |
|
3396 |
// ReadFile guarantees that if the return value is true, the requested |
|
3397 |
// number of bytes were read before returning. |
|
3398 |
bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; |
|
3399 |
if (!res) { |
|
3400 |
if (PrintMiscellaneous && Verbose) { |
|
3401 |
DWORD err = GetLastError(); |
|
3402 |
tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); |
|
3403 |
} |
|
3404 |
release_memory(base, bytes); |
|
3405 |
CloseHandle(hFile); |
|
3406 |
return NULL; |
|
3407 |
} |
|
3408 |
} else { |
|
3409 |
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, |
|
3410 |
NULL /*file_name*/); |
|
3411 |
if (hMap == NULL) { |
|
3412 |
if (PrintMiscellaneous && Verbose) { |
|
3413 |
DWORD err = GetLastError(); |
|
3414 |
tty->print_cr("CreateFileMapping() failed: GetLastError->%ld."); |
|
3415 |
} |
|
3416 |
CloseHandle(hFile); |
|
3417 |
return NULL; |
|
3418 |
} |
|
3419 |
||
3420 |
DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; |
|
3421 |
base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, |
|
3422 |
(DWORD)bytes, addr); |
|
3423 |
if (base == NULL) { |
|
3424 |
if (PrintMiscellaneous && Verbose) { |
|
3425 |
DWORD err = GetLastError(); |
|
3426 |
tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); |
|
3427 |
} |
|
3428 |
CloseHandle(hMap); |
|
3429 |
CloseHandle(hFile); |
|
3430 |
return NULL; |
|
3431 |
} |
|
3432 |
||
3433 |
if (CloseHandle(hMap) == 0) { |
|
3434 |
if (PrintMiscellaneous && Verbose) { |
|
3435 |
DWORD err = GetLastError(); |
|
3436 |
tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); |
|
3437 |
} |
|
3438 |
CloseHandle(hFile); |
|
3439 |
return base; |
|
3440 |
} |
|
3441 |
} |
|
3442 |
||
3443 |
if (allow_exec) { |
|
3444 |
DWORD old_protect; |
|
3445 |
DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; |
|
3446 |
bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; |
|
3447 |
||
3448 |
if (!res) { |
|
3449 |
if (PrintMiscellaneous && Verbose) { |
|
3450 |
DWORD err = GetLastError(); |
|
3451 |
tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); |
|
3452 |
} |
|
3453 |
// Don't consider this a hard error, on IA32 even if the |
|
3454 |
// VirtualProtect fails, we should still be able to execute |
|
3455 |
CloseHandle(hFile); |
|
3456 |
return base; |
|
3457 |
} |
|
3458 |
} |
|
3459 |
||
3460 |
if (CloseHandle(hFile) == 0) { |
|
3461 |
if (PrintMiscellaneous && Verbose) { |
|
3462 |
DWORD err = GetLastError(); |
|
3463 |
tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); |
|
3464 |
} |
|
3465 |
return base; |
|
3466 |
} |
|
3467 |
||
3468 |
return base; |
|
3469 |
} |
|
3470 |
||
3471 |
||
3472 |
// Remap a block of memory. |
|
3473 |
char* os::remap_memory(int fd, const char* file_name, size_t file_offset, |
|
3474 |
char *addr, size_t bytes, bool read_only, |
|
3475 |
bool allow_exec) { |
|
3476 |
// This OS does not allow existing memory maps to be remapped so we |
|
3477 |
// have to unmap the memory before we remap it. |
|
3478 |
if (!os::unmap_memory(addr, bytes)) { |
|
3479 |
return NULL; |
|
3480 |
} |
|
3481 |
||
3482 |
// There is a very small theoretical window between the unmap_memory() |
|
3483 |
// call above and the map_memory() call below where a thread in native |
|
3484 |
// code may be able to access an address that is no longer mapped. |
|
3485 |
||
3486 |
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, |
|
3487 |
allow_exec); |
|
3488 |
} |
|
3489 |
||
3490 |
||
3491 |
// Unmap a block of memory. |
|
3492 |
// Returns true=success, otherwise false. |
|
3493 |
||
3494 |
bool os::unmap_memory(char* addr, size_t bytes) { |
|
3495 |
BOOL result = UnmapViewOfFile(addr); |
|
3496 |
if (result == 0) { |
|
3497 |
if (PrintMiscellaneous && Verbose) { |
|
3498 |
DWORD err = GetLastError(); |
|
3499 |
tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); |
|
3500 |
} |
|
3501 |
return false; |
|
3502 |
} |
|
3503 |
return true; |
|
3504 |
} |
|
3505 |
||
3506 |
void os::pause() { |
|
3507 |
char filename[MAX_PATH]; |
|
3508 |
if (PauseAtStartupFile && PauseAtStartupFile[0]) { |
|
3509 |
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); |
|
3510 |
} else { |
|
3511 |
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); |
|
3512 |
} |
|
3513 |
||
3514 |
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); |
|
3515 |
if (fd != -1) { |
|
3516 |
struct stat buf; |
|
3517 |
close(fd); |
|
3518 |
while (::stat(filename, &buf) == 0) { |
|
3519 |
Sleep(100); |
|
3520 |
} |
|
3521 |
} else { |
|
3522 |
jio_fprintf(stderr, |
|
3523 |
"Could not open pause file '%s', continuing immediately.\n", filename); |
|
3524 |
} |
|
3525 |
} |
|
3526 |
||
3527 |
// An Event wraps a win32 "CreateEvent" kernel handle. |
|
3528 |
// |
|
3529 |
// We have a number of choices regarding "CreateEvent" win32 handle leakage: |
|
3530 |
// |
|
3531 |
// 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle |
|
3532 |
// field, and call CloseHandle() on the win32 event handle. Unpark() would |
|
3533 |
// need to be modified to tolerate finding a NULL (invalid) win32 event handle. |
|
3534 |
// In addition, an unpark() operation might fetch the handle field, but the |
|
3535 |
// event could recycle between the fetch and the SetEvent() operation. |
|
3536 |
// SetEvent() would either fail because the handle was invalid, or inadvertently work, |
|
3537 |
// as the win32 handle value had been recycled. In an ideal world calling SetEvent() |
|
3538 |
// on an stale but recycled handle would be harmless, but in practice this might |
|
3539 |
// confuse other non-Sun code, so it's not a viable approach. |
|
3540 |
// |
|
3541 |
// 2: Once a win32 event handle is associated with an Event, it remains associated |
|
3542 |
// with the Event. The event handle is never closed. This could be construed |
|
3543 |
// as handle leakage, but only up to the maximum # of threads that have been extant |
|
3544 |
// at any one time. This shouldn't be an issue, as windows platforms typically |
|
3545 |
// permit a process to have hundreds of thousands of open handles. |
|
3546 |
// |
|
3547 |
// 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList |
|
3548 |
// and release unused handles. |
|
3549 |
// |
|
3550 |
// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. |
|
3551 |
// It's not clear, however, that we wouldn't be trading one type of leak for another. |
|
3552 |
// |
|
3553 |
// 5. Use an RCU-like mechanism (Read-Copy Update). |
|
3554 |
// Or perhaps something similar to Maged Michael's "Hazard pointers". |
|
3555 |
// |
|
3556 |
// We use (2). |
|
3557 |
// |
|
3558 |
// TODO-FIXME: |
|
3559 |
// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. |
|
3560 |
// 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks |
|
3561 |
// to recover from (or at least detect) the dreaded Windows 841176 bug. |
|
3562 |
// 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent |
|
3563 |
// into a single win32 CreateEvent() handle. |
|
3564 |
// |
|
3565 |
// _Event transitions in park() |
|
3566 |
// -1 => -1 : illegal |
|
3567 |
// 1 => 0 : pass - return immediately |
|
3568 |
// 0 => -1 : block |
|
3569 |
// |
|
3570 |
// _Event serves as a restricted-range semaphore : |
|
3571 |
// -1 : thread is blocked |
|
3572 |
// 0 : neutral - thread is running or ready |
|
3573 |
// 1 : signaled - thread is running or ready |
|
3574 |
// |
|
3575 |
// Another possible encoding of _Event would be |
|
3576 |
// with explicit "PARKED" and "SIGNALED" bits. |
|
3577 |
||
3578 |
int os::PlatformEvent::park (jlong Millis) { |
|
3579 |
guarantee (_ParkHandle != NULL , "Invariant") ; |
|
3580 |
guarantee (Millis > 0 , "Invariant") ; |
|
3581 |
int v ; |
|
3582 |
||
3583 |
// CONSIDER: defer assigning a CreateEvent() handle to the Event until |
|
3584 |
// the initial park() operation. |
|
3585 |
||
3586 |
for (;;) { |
|
3587 |
v = _Event ; |
|
3588 |
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; |
|
3589 |
} |
|
3590 |
guarantee ((v == 0) || (v == 1), "invariant") ; |
|
3591 |
if (v != 0) return OS_OK ; |
|
3592 |
||
3593 |
// Do this the hard way by blocking ... |
|
3594 |
// TODO: consider a brief spin here, gated on the success of recent |
|
3595 |
// spin attempts by this thread. |
|
3596 |
// |
|
3597 |
// We decompose long timeouts into series of shorter timed waits. |
|
3598 |
// Evidently large timo values passed in WaitForSingleObject() are problematic on some |
|
3599 |
// versions of Windows. See EventWait() for details. This may be superstition. Or not. |
|
3600 |
// We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time |
|
3601 |
// with os::javaTimeNanos(). Furthermore, we assume that spurious returns from |
|
3602 |
// ::WaitForSingleObject() caused by latent ::setEvent() operations will tend |
|
3603 |
// to happen early in the wait interval. Specifically, after a spurious wakeup (rv == |
|
3604 |
// WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate |
|
3605 |
// for the already waited time. This policy does not admit any new outcomes. |
|
3606 |
// In the future, however, we might want to track the accumulated wait time and |
|
3607 |
// adjust Millis accordingly if we encounter a spurious wakeup. |
|
3608 |
||
3609 |
const int MAXTIMEOUT = 0x10000000 ; |
|
3610 |
DWORD rv = WAIT_TIMEOUT ; |
|
3611 |
while (_Event < 0 && Millis > 0) { |
|
3612 |
DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) |
|
3613 |
if (Millis > MAXTIMEOUT) { |
|
3614 |
prd = MAXTIMEOUT ; |
|
3615 |
} |
|
3616 |
rv = ::WaitForSingleObject (_ParkHandle, prd) ; |
|
3617 |
assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; |
|
3618 |
if (rv == WAIT_TIMEOUT) { |
|
3619 |
Millis -= prd ; |
|
3620 |
} |
|
3621 |
} |
|
3622 |
v = _Event ; |
|
3623 |
_Event = 0 ; |
|
3624 |
OrderAccess::fence() ; |
|
3625 |
// If we encounter a nearly simultanous timeout expiry and unpark() |
|
3626 |
// we return OS_OK indicating we awoke via unpark(). |
|
3627 |
// Implementor's license -- returning OS_TIMEOUT would be equally valid, however. |
|
3628 |
return (v >= 0) ? OS_OK : OS_TIMEOUT ; |
|
3629 |
} |
|
3630 |
||
3631 |
void os::PlatformEvent::park () { |
|
3632 |
guarantee (_ParkHandle != NULL, "Invariant") ; |
|
3633 |
// Invariant: Only the thread associated with the Event/PlatformEvent |
|
3634 |
// may call park(). |
|
3635 |
int v ; |
|
3636 |
for (;;) { |
|
3637 |
v = _Event ; |
|
3638 |
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; |
|
3639 |
} |
|
3640 |
guarantee ((v == 0) || (v == 1), "invariant") ; |
|
3641 |
if (v != 0) return ; |
|
3642 |
||
3643 |
// Do this the hard way by blocking ... |
|
3644 |
// TODO: consider a brief spin here, gated on the success of recent |
|
3645 |
// spin attempts by this thread. |
|
3646 |
while (_Event < 0) { |
|
3647 |
DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; |
|
3648 |
assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; |
|
3649 |
} |
|
3650 |
||
3651 |
// Usually we'll find _Event == 0 at this point, but as |
|
3652 |
// an optional optimization we clear it, just in case can |
|
3653 |
// multiple unpark() operations drove _Event up to 1. |
|
3654 |
_Event = 0 ; |
|
3655 |
OrderAccess::fence() ; |
|
3656 |
guarantee (_Event >= 0, "invariant") ; |
|
3657 |
} |
|
3658 |
||
3659 |
void os::PlatformEvent::unpark() { |
|
3660 |
guarantee (_ParkHandle != NULL, "Invariant") ; |
|
3661 |
int v ; |
|
3662 |
for (;;) { |
|
3663 |
v = _Event ; // Increment _Event if it's < 1. |
|
3664 |
if (v > 0) { |
|
3665 |
// If it's already signaled just return. |
|
3666 |
// The LD of _Event could have reordered or be satisfied |
|
3667 |
// by a read-aside from this processor's write buffer. |
|
3668 |
// To avoid problems execute a barrier and then |
|
3669 |
// ratify the value. A degenerate CAS() would also work. |
|
3670 |
// Viz., CAS (v+0, &_Event, v) == v). |
|
3671 |
OrderAccess::fence() ; |
|
3672 |
if (_Event == v) return ; |
|
3673 |
continue ; |
|
3674 |
} |
|
3675 |
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; |
|
3676 |
} |
|
3677 |
if (v < 0) { |
|
3678 |
::SetEvent (_ParkHandle) ; |
|
3679 |
} |
|
3680 |
} |
|
3681 |
||
3682 |
||
3683 |
// JSR166 |
|
3684 |
// ------------------------------------------------------- |
|
3685 |
||
3686 |
/* |
|
3687 |
* The Windows implementation of Park is very straightforward: Basic |
|
3688 |
* operations on Win32 Events turn out to have the right semantics to |
|
3689 |
* use them directly. We opportunistically resuse the event inherited |
|
3690 |
* from Monitor. |
|
3691 |
*/ |
|
3692 |
||
3693 |
||
3694 |
void Parker::park(bool isAbsolute, jlong time) { |
|
3695 |
guarantee (_ParkEvent != NULL, "invariant") ; |
|
3696 |
// First, demultiplex/decode time arguments |
|
3697 |
if (time < 0) { // don't wait |
|
3698 |
return; |
|
3699 |
} |
|
3700 |
else if (time == 0) { |
|
3701 |
time = INFINITE; |
|
3702 |
} |
|
3703 |
else if (isAbsolute) { |
|
3704 |
time -= os::javaTimeMillis(); // convert to relative time |
|
3705 |
if (time <= 0) // already elapsed |
|
3706 |
return; |
|
3707 |
} |
|
3708 |
else { // relative |
|
3709 |
time /= 1000000; // Must coarsen from nanos to millis |
|
3710 |
if (time == 0) // Wait for the minimal time unit if zero |
|
3711 |
time = 1; |
|
3712 |
} |
|
3713 |
||
3714 |
JavaThread* thread = (JavaThread*)(Thread::current()); |
|
3715 |
assert(thread->is_Java_thread(), "Must be JavaThread"); |
|
3716 |
JavaThread *jt = (JavaThread *)thread; |
|
3717 |
||
3718 |
// Don't wait if interrupted or already triggered |
|
3719 |
if (Thread::is_interrupted(thread, false) || |
|
3720 |
WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { |
|
3721 |
ResetEvent(_ParkEvent); |
|
3722 |
return; |
|
3723 |
} |
|
3724 |
else { |
|
3725 |
ThreadBlockInVM tbivm(jt); |
|
3726 |
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); |
|
3727 |
jt->set_suspend_equivalent(); |
|
3728 |
||
3729 |
WaitForSingleObject(_ParkEvent, time); |
|
3730 |
ResetEvent(_ParkEvent); |
|
3731 |
||
3732 |
// If externally suspended while waiting, re-suspend |
|
3733 |
if (jt->handle_special_suspend_equivalent_condition()) { |
|
3734 |
jt->java_suspend_self(); |
|
3735 |
} |
|
3736 |
} |
|
3737 |
} |
|
3738 |
||
3739 |
void Parker::unpark() { |
|
3740 |
guarantee (_ParkEvent != NULL, "invariant") ; |
|
3741 |
SetEvent(_ParkEvent); |
|
3742 |
} |
|
3743 |
||
3744 |
// Run the specified command in a separate process. Return its exit value, |
|
3745 |
// or -1 on failure (e.g. can't create a new process). |
|
3746 |
int os::fork_and_exec(char* cmd) { |
|
3747 |
STARTUPINFO si; |
|
3748 |
PROCESS_INFORMATION pi; |
|
3749 |
||
3750 |
memset(&si, 0, sizeof(si)); |
|
3751 |
si.cb = sizeof(si); |
|
3752 |
memset(&pi, 0, sizeof(pi)); |
|
3753 |
BOOL rslt = CreateProcess(NULL, // executable name - use command line |
|
3754 |
cmd, // command line |
|
3755 |
NULL, // process security attribute |
|
3756 |
NULL, // thread security attribute |
|
3757 |
TRUE, // inherits system handles |
|
3758 |
0, // no creation flags |
|
3759 |
NULL, // use parent's environment block |
|
3760 |
NULL, // use parent's starting directory |
|
3761 |
&si, // (in) startup information |
|
3762 |
&pi); // (out) process information |
|
3763 |
||
3764 |
if (rslt) { |
|
3765 |
// Wait until child process exits. |
|
3766 |
WaitForSingleObject(pi.hProcess, INFINITE); |
|
3767 |
||
3768 |
DWORD exit_code; |
|
3769 |
GetExitCodeProcess(pi.hProcess, &exit_code); |
|
3770 |
||
3771 |
// Close process and thread handles. |
|
3772 |
CloseHandle(pi.hProcess); |
|
3773 |
CloseHandle(pi.hThread); |
|
3774 |
||
3775 |
return (int)exit_code; |
|
3776 |
} else { |
|
3777 |
return -1; |
|
3778 |
} |
|
3779 |
} |
|
3780 |
||
3781 |
//-------------------------------------------------------------------------------------------------- |
|
3782 |
// Non-product code |
|
3783 |
||
3784 |
static int mallocDebugIntervalCounter = 0; |
|
3785 |
static int mallocDebugCounter = 0; |
|
3786 |
bool os::check_heap(bool force) { |
|
3787 |
if (++mallocDebugCounter < MallocVerifyStart && !force) return true; |
|
3788 |
if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { |
|
3789 |
// Note: HeapValidate executes two hardware breakpoints when it finds something |
|
3790 |
// wrong; at these points, eax contains the address of the offending block (I think). |
|
3791 |
// To get to the exlicit error message(s) below, just continue twice. |
|
3792 |
HANDLE heap = GetProcessHeap(); |
|
3793 |
{ HeapLock(heap); |
|
3794 |
PROCESS_HEAP_ENTRY phe; |
|
3795 |
phe.lpData = NULL; |
|
3796 |
while (HeapWalk(heap, &phe) != 0) { |
|
3797 |
if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && |
|
3798 |
!HeapValidate(heap, 0, phe.lpData)) { |
|
3799 |
tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); |
|
3800 |
tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); |
|
3801 |
fatal("corrupted C heap"); |
|
3802 |
} |
|
3803 |
} |
|
3804 |
int err = GetLastError(); |
|
3805 |
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { |
|
3806 |
fatal1("heap walk aborted with error %d", err); |
|
3807 |
} |
|
3808 |
HeapUnlock(heap); |
|
3809 |
} |
|
3810 |
mallocDebugIntervalCounter = 0; |
|
3811 |
} |
|
3812 |
return true; |
|
3813 |
} |
|
3814 |
||
3815 |
||
3816 |
#ifndef PRODUCT |
|
3817 |
bool os::find(address addr) { |
|
3818 |
// Nothing yet |
|
3819 |
return false; |
|
3820 |
} |
|
3821 |
#endif |
|
3822 |
||
3823 |
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { |
|
3824 |
DWORD exception_code = e->ExceptionRecord->ExceptionCode; |
|
3825 |
||
3826 |
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { |
|
3827 |
JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); |
|
3828 |
PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; |
|
3829 |
address addr = (address) exceptionRecord->ExceptionInformation[1]; |
|
3830 |
||
3831 |
if (os::is_memory_serialize_page(thread, addr)) |
|
3832 |
return EXCEPTION_CONTINUE_EXECUTION; |
|
3833 |
} |
|
3834 |
||
3835 |
return EXCEPTION_CONTINUE_SEARCH; |
|
3836 |
} |
|
3837 |
||
3838 |
static int getLastErrorString(char *buf, size_t len) |
|
3839 |
{ |
|
3840 |
long errval; |
|
3841 |
||
3842 |
if ((errval = GetLastError()) != 0) |
|
3843 |
{ |
|
3844 |
/* DOS error */ |
|
3845 |
size_t n = (size_t)FormatMessage( |
|
3846 |
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, |
|
3847 |
NULL, |
|
3848 |
errval, |
|
3849 |
0, |
|
3850 |
buf, |
|
3851 |
(DWORD)len, |
|
3852 |
NULL); |
|
3853 |
if (n > 3) { |
|
3854 |
/* Drop final '.', CR, LF */ |
|
3855 |
if (buf[n - 1] == '\n') n--; |
|
3856 |
if (buf[n - 1] == '\r') n--; |
|
3857 |
if (buf[n - 1] == '.') n--; |
|
3858 |
buf[n] = '\0'; |
|
3859 |
} |
|
3860 |
return (int)n; |
|
3861 |
} |
|
3862 |
||
3863 |
if (errno != 0) |
|
3864 |
{ |
|
3865 |
/* C runtime error that has no corresponding DOS error code */ |
|
3866 |
const char *s = strerror(errno); |
|
3867 |
size_t n = strlen(s); |
|
3868 |
if (n >= len) n = len - 1; |
|
3869 |
strncpy(buf, s, n); |
|
3870 |
buf[n] = '\0'; |
|
3871 |
return (int)n; |
|
3872 |
} |
|
3873 |
return 0; |
|
3874 |
} |