2591 bool os::can_execute_large_page_memory() { |
2591 bool os::can_execute_large_page_memory() { |
2592 return true; |
2592 return true; |
2593 } |
2593 } |
2594 |
2594 |
2595 char* os::reserve_memory_special(size_t bytes) { |
2595 char* os::reserve_memory_special(size_t bytes) { |
2596 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; |
2596 |
2597 char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_EXECUTE_READWRITE); |
2597 if (UseLargePagesIndividualAllocation) { |
2598 return res; |
2598 if (TracePageSizes && Verbose) { |
|
2599 tty->print_cr("Reserving large pages individually."); |
|
2600 } |
|
2601 char * p_buf; |
|
2602 // first reserve enough address space in advance since we want to be |
|
2603 // able to break a single contiguous virtual address range into multiple |
|
2604 // large page commits but WS2003 does not allow reserving large page space |
|
2605 // so we just use 4K pages for reserve, this gives us a legal contiguous |
|
2606 // address space. then we will deallocate that reservation, and re alloc |
|
2607 // using large pages |
|
2608 const size_t size_of_reserve = bytes + _large_page_size; |
|
2609 if (bytes > size_of_reserve) { |
|
2610 // Overflowed. |
|
2611 warning("Individually allocated large pages failed, " |
|
2612 "use -XX:-UseLargePagesIndividualAllocation to turn off"); |
|
2613 return NULL; |
|
2614 } |
|
2615 p_buf = (char *) VirtualAlloc(NULL, |
|
2616 size_of_reserve, // size of Reserve |
|
2617 MEM_RESERVE, |
|
2618 PAGE_EXECUTE_READWRITE); |
|
2619 // If reservation failed, return NULL |
|
2620 if (p_buf == NULL) return NULL; |
|
2621 |
|
2622 release_memory(p_buf, bytes + _large_page_size); |
|
2623 // round up to page boundary. If the size_of_reserve did not |
|
2624 // overflow and the reservation did not fail, this align up |
|
2625 // should not overflow. |
|
2626 p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size); |
|
2627 |
|
2628 // now go through and allocate one page at a time until all bytes are |
|
2629 // allocated |
|
2630 size_t bytes_remaining = align_size_up(bytes, _large_page_size); |
|
2631 // An overflow of align_size_up() would have been caught above |
|
2632 // in the calculation of size_of_reserve. |
|
2633 char * next_alloc_addr = p_buf; |
|
2634 |
|
2635 #ifdef ASSERT |
|
2636 // Variable for the failure injection |
|
2637 long ran_num = os::random(); |
|
2638 size_t fail_after = ran_num % bytes; |
|
2639 #endif |
|
2640 |
|
2641 while (bytes_remaining) { |
|
2642 size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size); |
|
2643 // Note allocate and commit |
|
2644 char * p_new; |
|
2645 |
|
2646 #ifdef ASSERT |
|
2647 bool inject_error = LargePagesIndividualAllocationInjectError && |
|
2648 (bytes_remaining <= fail_after); |
|
2649 #else |
|
2650 const bool inject_error = false; |
|
2651 #endif |
|
2652 |
|
2653 if (inject_error) { |
|
2654 p_new = NULL; |
|
2655 } else { |
|
2656 p_new = (char *) VirtualAlloc(next_alloc_addr, |
|
2657 bytes_to_rq, |
|
2658 MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, |
|
2659 PAGE_EXECUTE_READWRITE); |
|
2660 } |
|
2661 |
|
2662 if (p_new == NULL) { |
|
2663 // Free any allocated pages |
|
2664 if (next_alloc_addr > p_buf) { |
|
2665 // Some memory was committed so release it. |
|
2666 size_t bytes_to_release = bytes - bytes_remaining; |
|
2667 release_memory(p_buf, bytes_to_release); |
|
2668 } |
|
2669 #ifdef ASSERT |
|
2670 if (UseLargePagesIndividualAllocation && |
|
2671 LargePagesIndividualAllocationInjectError) { |
|
2672 if (TracePageSizes && Verbose) { |
|
2673 tty->print_cr("Reserving large pages individually failed."); |
|
2674 } |
|
2675 } |
|
2676 #endif |
|
2677 return NULL; |
|
2678 } |
|
2679 bytes_remaining -= bytes_to_rq; |
|
2680 next_alloc_addr += bytes_to_rq; |
|
2681 } |
|
2682 |
|
2683 return p_buf; |
|
2684 |
|
2685 } else { |
|
2686 // normal policy just allocate it all at once |
|
2687 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; |
|
2688 char * res = (char *)VirtualAlloc(NULL, |
|
2689 bytes, |
|
2690 flag, |
|
2691 PAGE_EXECUTE_READWRITE); |
|
2692 return res; |
|
2693 } |
2599 } |
2694 } |
2600 |
2695 |
2601 bool os::release_memory_special(char* base, size_t bytes) { |
2696 bool os::release_memory_special(char* base, size_t bytes) { |
2602 return release_memory(base, bytes); |
2697 return release_memory(base, bytes); |
2603 } |
2698 } |
3003 OSVERSIONINFO oi; |
3099 OSVERSIONINFO oi; |
3004 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); |
3100 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); |
3005 GetVersionEx(&oi); |
3101 GetVersionEx(&oi); |
3006 switch(oi.dwPlatformId) { |
3102 switch(oi.dwPlatformId) { |
3007 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; |
3103 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; |
3008 case VER_PLATFORM_WIN32_NT: _is_nt = true; break; |
3104 case VER_PLATFORM_WIN32_NT: |
|
3105 _is_nt = true; |
|
3106 { |
|
3107 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; |
|
3108 if (os_vers == 5002) { |
|
3109 _is_windows_2003 = true; |
|
3110 } |
|
3111 } |
|
3112 break; |
3009 default: fatal("Unknown platform"); |
3113 default: fatal("Unknown platform"); |
3010 } |
3114 } |
3011 |
3115 |
3012 _default_stack_size = os::current_stack_size(); |
3116 _default_stack_size = os::current_stack_size(); |
3013 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); |
3117 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); |
3101 #ifndef PRODUCT |
3205 #ifndef PRODUCT |
3102 if (is_MP()) { |
3206 if (is_MP()) { |
3103 NoYieldsInMicrolock = true; |
3207 NoYieldsInMicrolock = true; |
3104 } |
3208 } |
3105 #endif |
3209 #endif |
|
3210 // This may be overridden later when argument processing is done. |
|
3211 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, |
|
3212 os::win32::is_windows_2003()); |
|
3213 |
3106 // Initialize main_process and main_thread |
3214 // Initialize main_process and main_thread |
3107 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle |
3215 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle |
3108 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, |
3216 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, |
3109 &main_thread, THREAD_ALL_ACCESS, false, 0)) { |
3217 &main_thread, THREAD_ALL_ACCESS, false, 0)) { |
3110 fatal("DuplicateHandle failed\n"); |
3218 fatal("DuplicateHandle failed\n"); |
3111 } |
3219 } |
3112 main_thread_id = (int) GetCurrentThreadId(); |
3220 main_thread_id = (int) GetCurrentThreadId(); |
3113 } |
3221 } |