--- a/hotspot/make/linux/makefiles/vm.make Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/make/linux/makefiles/vm.make Thu Jul 18 00:52:54 2013 -0400
@@ -46,6 +46,7 @@
include $(MAKEFILES_DIR)/zeroshark.make
else
include $(MAKEFILES_DIR)/$(BUILDARCH).make
+ -include $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
endif
# set VPATH so make knows where to look for source files
@@ -380,4 +381,4 @@
install: install_jvm install_jsig install_saproc
-.PHONY: default build install install_jvm
+.PHONY: default build install install_jvm $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -410,6 +410,51 @@
return start;
}
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+ //
+ // arguments:
+ // o0 = adr
+ // o1 = errValue
+ //
+ // result:
+ // o0 = *adr or errValue
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ __ align(CodeEntryAlignment);
+ *entry = __ pc();
+
+ __ mov(O0, G1); // g1 = o0
+ __ mov(O1, O0); // o0 = o1
+ // Load *adr into c_rarg1, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ ldsw(G1, 0, O0); // o0 = [g1]
+ break;
+ case 8:
+ // int64_t
+ __ ldx(G1, 0, O0); // o0 = [g1]
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // return errValue or *adr
+ *continuation_pc = __ pc();
+ // By convention with the trap handler we ensure there is a non-CTI
+ // instruction in the trap shadow.
+ __ nop();
+ __ retl();
+ __ delayed()->nop();
+ }
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
@@ -3315,6 +3360,14 @@
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+ &StubRoutines::_safefetchN_fault_pc,
+ &StubRoutines::_safefetchN_continuation_pc);
}
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -2766,6 +2766,39 @@
return start;
}
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ *entry = __ pc();
+
+ __ movl(rax, Address(rsp, 0x8));
+ __ movl(rcx, Address(rsp, 0x4));
+ // Load *adr into eax, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ movl(rax, Address(rcx, 0));
+ break;
+ case 8:
+ // int64_t
+ Unimplemented();
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // Return errValue or *adr.
+ *continuation_pc = __ pc();
+ __ ret(0);
+ }
public:
// Information about frame layout at time of blocking runtime call.
@@ -2978,6 +3011,14 @@
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
}
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry;
+ StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc;
+ StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
}
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -3357,7 +3357,45 @@
return start;
}
-
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+ //
+ // arguments:
+ // c_rarg0 = adr
+ // c_rarg1 = errValue
+ //
+ // result:
+ // PPC_RET = *adr or errValue
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ *entry = __ pc();
+
+ // Load *adr into c_rarg1, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ movl(c_rarg1, Address(c_rarg0, 0));
+ break;
+ case 8:
+ // int64_t
+ __ movq(c_rarg1, Address(c_rarg0, 0));
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // return errValue or *adr
+ *continuation_pc = __ pc();
+ __ movq(rax, c_rarg1);
+ __ ret(0);
+ }
// This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
// to hide instruction latency
@@ -3833,6 +3871,14 @@
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+ &StubRoutines::_safefetchN_fault_pc,
+ &StubRoutines::_safefetchN_continuation_pc);
}
public:
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -2323,6 +2323,11 @@
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
+ // Handle SafeFetch32 and SafeFetchN exceptions.
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
+ }
+
#ifndef _WIN64
// Execution protection violation - win32 running on AMD64 only
// Handled first to avoid misdiagnosis as a "normal" access violation;
--- a/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_32.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_32.s Thu Jul 18 00:52:54 2013 -0400
@@ -63,24 +63,6 @@
popl %eax
ret
- .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
- .globl SYMBOL(SafeFetchN)
- ## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
- ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
- ## routine to vet the address. If the address is the faulting LD then
- ## SafeFetchTriage() would return the resume-at EIP, otherwise null.
- ELF_TYPE(SafeFetch32,@function)
- .p2align 4,,15
-SYMBOL(SafeFetch32):
-SYMBOL(SafeFetchN):
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-SYMBOL(Fetch32PFI):
- movl (%ecx), %eax
-SYMBOL(Fetch32Resume):
- ret
-
-
.globl SYMBOL(SpinPause)
ELF_TYPE(SpinPause,@function)
.p2align 4,,15
--- a/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.s Thu Jul 18 00:52:54 2013 -0400
@@ -46,28 +46,6 @@
.text
- .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
- .p2align 4,,15
- ELF_TYPE(SafeFetch32,@function)
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SYMBOL(SafeFetch32):
- movl %esi, %eax
-SYMBOL(Fetch32PFI):
- movl (%rdi), %eax
-SYMBOL(Fetch32Resume):
- ret
-
- .globl SYMBOL(SafeFetchN), SYMBOL(FetchNPFI), SYMBOL(FetchNResume)
- .p2align 4,,15
- ELF_TYPE(SafeFetchN,@function)
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SYMBOL(SafeFetchN):
- movq %rsi, %rax
-SYMBOL(FetchNPFI):
- movq (%rdi), %rax
-SYMBOL(FetchNResume):
- ret
-
.globl SYMBOL(SpinPause)
.p2align 4,,15
ELF_TYPE(SpinPause,@function)
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -385,13 +385,6 @@
trap_page_fault = 0xE
};
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_bsd_signal(int sig,
siginfo_t* info,
@@ -454,16 +447,10 @@
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Bsd::ucontext_get_pc(uc);
- if (pc == (address) Fetch32PFI) {
- uc->context_pc = intptr_t(Fetch32Resume) ;
- return 1 ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return 1;
}
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->context_pc = intptr_t (FetchNResume) ;
- return 1 ;
- }
-#endif // AMD64
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
--- a/hotspot/src/os_cpu/linux_sparc/vm/linux_sparc.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/linux_sparc.s Thu Jul 18 00:52:54 2013 -0400
@@ -21,42 +21,6 @@
# questions.
#
- # Prototype: int SafeFetch32 (int * adr, int ErrValue)
- # The "ld" at Fetch32 is potentially faulting instruction.
- # If the instruction traps the trap handler will arrange
- # for control to resume at Fetch32Resume.
- # By convention with the trap handler we ensure there is a non-CTI
- # instruction in the trap shadow.
-
-
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .globl SafeFetchN
- .align 32
- .type SafeFetch32,@function
-SafeFetch32:
- mov %o0, %g1
- mov %o1, %o0
-Fetch32PFI:
- # <-- Potentially faulting instruction
- ld [%g1], %o0
-Fetch32Resume:
- nop
- retl
- nop
-
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .type SafeFetchN,@function
- .align 32
-SafeFetchN:
- mov %o0, %g1
- mov %o1, %o0
-FetchNPFI:
- ldn [%g1], %o0
-FetchNResume:
- nop
- retl
- nop
-
# Possibilities:
# -- membar
# -- CAS (SP + BIAS, G0, G0)
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -366,18 +366,9 @@
// Utility functions
-extern "C" void Fetch32PFI();
-extern "C" void Fetch32Resume();
-extern "C" void FetchNPFI();
-extern "C" void FetchNResume();
-
inline static bool checkPrefetch(sigcontext* uc, address pc) {
- if (pc == (address) Fetch32PFI) {
- set_cont_address(uc, address(Fetch32Resume));
- return true;
- }
- if (pc == (address) FetchNPFI) {
- set_cont_address(uc, address(FetchNResume));
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ set_cont_address(uc, address(StubRoutines::continuation_for_safefetch_fault(pc)));
return true;
}
return false;
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s Thu Jul 18 00:52:54 2013 -0400
@@ -42,24 +42,6 @@
.text
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .globl SafeFetchN
- ## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
- ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
- ## routine to vet the address. If the address is the faulting LD then
- ## SafeFetchTriage() would return the resume-at EIP, otherwise null.
- .type SafeFetch32,@function
- .p2align 4,,15
-SafeFetch32:
-SafeFetchN:
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-Fetch32PFI:
- movl (%ecx), %eax
-Fetch32Resume:
- ret
-
-
.globl SpinPause
.type SpinPause,@function
.p2align 4,,15
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.s Thu Jul 18 00:52:54 2013 -0400
@@ -38,28 +38,6 @@
.text
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .align 16
- .type SafeFetch32,@function
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SafeFetch32:
- movl %esi, %eax
-Fetch32PFI:
- movl (%rdi), %eax
-Fetch32Resume:
- ret
-
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .align 16
- .type SafeFetchN,@function
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SafeFetchN:
- movq %rsi, %rax
-FetchNPFI:
- movq (%rdi), %rax
-FetchNResume:
- ret
-
.globl SpinPause
.align 16
.type SpinPause,@function
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -209,13 +209,6 @@
trap_page_fault = 0xE
};
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
siginfo_t* info,
@@ -278,16 +271,10 @@
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Linux::ucontext_get_pc(uc);
- if (pc == (address) Fetch32PFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
- return 1 ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return 1;
}
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
- return 1 ;
- }
-#endif // AMD64
#ifndef AMD64
// Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -303,11 +303,6 @@
#endif
}
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
int abort_if_unrecognized) {
@@ -379,17 +374,10 @@
npc = (address) uc->uc_mcontext.gregs[REG_nPC];
// SafeFetch() support
- // Implemented with either a fixed set of addresses such
- // as Fetch32*, or with Thread._OnTrap.
- if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(Fetch32PFI)) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(Fetch32Resume) ;
- uc->uc_mcontext.gregs [REG_nPC] = intptr_t(Fetch32Resume) + 4 ;
- return true ;
- }
- if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(FetchNPFI)) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
- uc->uc_mcontext.gregs [REG_nPC] = intptr_t(FetchNResume) + 4 ;
- return true ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ uc->uc_mcontext.gregs[REG_nPC] = uc->uc_mcontext.gregs[REG_PC] + 4;
+ return 1;
}
// Handle ALL stack overflow variations here
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Thu Jul 18 00:52:54 2013 -0400
@@ -21,47 +21,6 @@
!! questions.
!!
- !! Prototype: int SafeFetch32 (int * adr, int ErrValue)
- !! The "ld" at Fetch32 is potentially faulting instruction.
- !! If the instruction traps the trap handler will arrange
- !! for control to resume at Fetch32Resume.
- !! By convention with the trap handler we ensure there is a non-CTI
- !! instruction in the trap shadow.
- !!
- !! The reader might be tempted to move this service to .il.
- !! Don't. Sun's CC back-end reads and optimize code emitted
- !! by the .il "call", in some cases optimizing the code, completely eliding it,
- !! or by moving the code from the "call site".
-
- !! ASM better know we may use G6 for our own purposes
- .register %g6, #ignore
-
- .globl SafeFetch32
- .align 32
- .global Fetch32PFI, Fetch32Resume
-SafeFetch32:
- mov %o0, %g1
- mov %o1, %o0
-Fetch32PFI:
- ld [%g1], %o0 !! <-- Potentially faulting instruction
-Fetch32Resume:
- nop
- retl
- nop
-
- .globl SafeFetchN
- .align 32
- .globl FetchNPFI, FetchNResume
-SafeFetchN:
- mov %o0, %g1
- mov %o1, %o0
-FetchNPFI:
- ldn [%g1], %o0
-FetchNResume:
- nop
- retl
- nop
-
!! Possibilities:
!! -- membar
!! -- CAS (SP + BIAS, G0, G0)
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -352,13 +352,6 @@
}
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
int abort_if_unrecognized) {
@@ -436,17 +429,10 @@
// factor me: getPCfromContext
pc = (address) uc->uc_mcontext.gregs[REG_PC];
- // SafeFetch32() support
- if (pc == (address) Fetch32PFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
- return true ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return true;
}
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
- return true ;
- }
-#endif // AMD64
// Handle ALL stack overflow variations here
if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Thu Jul 18 00:52:54 2013 -0400
@@ -54,20 +54,6 @@
popl %eax
ret
- .align 16
- .globl SafeFetch32
- .globl SafeFetchN
- .globl Fetch32PFI, Fetch32Resume
-SafeFetch32:
-SafeFetchN:
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-Fetch32PFI:
- movl (%ecx), %eax
-Fetch32Resume:
- ret
-
-
.align 16
.globl SpinPause
SpinPause:
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.s Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.s Thu Jul 18 00:52:54 2013 -0400
@@ -21,54 +21,34 @@
/ questions.
/
- .globl fs_load
- .globl fs_thread
+ .globl fs_load
+ .globl fs_thread
// NOTE WELL! The _Copy functions are called directly
- // from server-compiler-generated code via CallLeafNoFP,
- // which means that they *must* either not use floating
- // point or use it in the same manner as does the server
- // compiler.
+ // from server-compiler-generated code via CallLeafNoFP,
+ // which means that they *must* either not use floating
+ // point or use it in the same manner as does the server
+ // compiler.
.globl _Copy_arrayof_conjoint_bytes
.globl _Copy_conjoint_jshorts_atomic
- .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_arrayof_conjoint_jshorts
.globl _Copy_conjoint_jints_atomic
.globl _Copy_arrayof_conjoint_jints
- .globl _Copy_conjoint_jlongs_atomic
+ .globl _Copy_conjoint_jlongs_atomic
.globl _Copy_arrayof_conjoint_jlongs
- .section .text,"ax"
+ .section .text,"ax"
/ Fast thread accessors, used by threadLS_solaris_amd64.cpp
- .align 16
+ .align 16
fs_load:
- movq %fs:(%rdi),%rax
- ret
-
- .align 16
-fs_thread:
- movq %fs:0x0,%rax
- ret
-
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .align 16
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SafeFetch32:
- movl %esi, %eax
-Fetch32PFI:
- movl (%rdi), %eax
-Fetch32Resume:
+ movq %fs:(%rdi),%rax
ret
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .align 16
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SafeFetchN:
- movq %rsi, %rax
-FetchNPFI:
- movq (%rdi), %rax
-FetchNResume:
+ .align 16
+fs_thread:
+ movq %fs:0x0,%rax
ret
.globl SpinPause
@@ -78,7 +58,7 @@
nop
movq $1, %rax
ret
-
+
/ Support for void Copy::arrayof_conjoint_bytes(void* from,
/ void* to,
@@ -340,7 +320,7 @@
addq $4,%rdx
jg 1b
ret
-
+
/ Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
/ jlong* to,
/ size_t count)
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -518,24 +518,6 @@
st->cr();
}
-extern "C" int SafeFetch32 (int * adr, int Err) {
- int rv = Err ;
- _try {
- rv = *((volatile int *) adr) ;
- } __except(EXCEPTION_EXECUTE_HANDLER) {
- }
- return rv ;
-}
-
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
- intptr_t rv = Err ;
- _try {
- rv = *((volatile intptr_t *) adr) ;
- } __except(EXCEPTION_EXECUTE_HANDLER) {
- }
- return rv ;
-}
-
extern "C" int SpinPause () {
#ifdef AMD64
return 0 ;
--- a/hotspot/src/share/vm/runtime/os.hpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp Thu Jul 18 00:52:54 2013 -0400
@@ -915,8 +915,6 @@
// of the global SpinPause() with C linkage.
// It'd also be eligible for inlining on many platforms.
-extern "C" int SpinPause () ;
-extern "C" int SafeFetch32 (int * adr, int errValue) ;
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
+extern "C" int SpinPause();
#endif // SHARE_VM_RUNTIME_OS_HPP
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -136,6 +136,13 @@
double (* StubRoutines::_intrinsic_cos )(double) = NULL;
double (* StubRoutines::_intrinsic_tan )(double) = NULL;
+address StubRoutines::_safefetch32_entry = NULL;
+address StubRoutines::_safefetch32_fault_pc = NULL;
+address StubRoutines::_safefetch32_continuation_pc = NULL;
+address StubRoutines::_safefetchN_entry = NULL;
+address StubRoutines::_safefetchN_fault_pc = NULL;
+address StubRoutines::_safefetchN_continuation_pc = NULL;
+
// Initialization
//
// Note: to break cycle with universe initialization, stubs are generated in two phases.
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp Thu Jul 18 00:52:54 2013 -0400
@@ -221,6 +221,14 @@
static double (*_intrinsic_cos)(double);
static double (*_intrinsic_tan)(double);
+ // Safefetch stubs.
+ static address _safefetch32_entry;
+ static address _safefetch32_fault_pc;
+ static address _safefetch32_continuation_pc;
+ static address _safefetchN_entry;
+ static address _safefetchN_fault_pc;
+ static address _safefetchN_continuation_pc;
+
public:
// Initialization/Testing
static void initialize1(); // must happen before universe::genesis
@@ -382,6 +390,34 @@
}
//
+ // Safefetch stub support
+ //
+
+ typedef int (*SafeFetch32Stub)(int* adr, int errValue);
+ typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue);
+
+ static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); }
+ static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); }
+
+ static bool is_safefetch_fault(address pc) {
+ return pc != NULL &&
+ (pc == _safefetch32_fault_pc ||
+ pc == _safefetchN_fault_pc);
+ }
+
+ static address continuation_for_safefetch_fault(address pc) {
+ assert(_safefetch32_continuation_pc != NULL &&
+ _safefetchN_continuation_pc != NULL,
+ "not initialized");
+
+ if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc;
+ if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc;
+
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ //
// Default versions of the above arraycopy functions for platforms which do
// not have specialized versions
//
@@ -400,4 +436,15 @@
static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
};
+// Safefetch allows to load a value from a location that's not known
+// to be valid. If the load causes a fault, the error value is returned.
+inline int SafeFetch32(int* adr, int errValue) {
+ assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetch32_stub()(adr, errValue);
+}
+inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
+ assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetchN_stub()(adr, errValue);
+}
+
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
--- a/hotspot/src/share/vm/services/memTracker.cpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/share/vm/services/memTracker.cpp Thu Jul 18 00:52:54 2013 -0400
@@ -81,13 +81,13 @@
} else if (strcmp(option_line, "=detail") == 0) {
// detail relies on a stack-walking ability that may not
// be available depending on platform and/or compiler flags
- if (PLATFORM_NMT_DETAIL_SUPPORTED) {
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
_tracking_level = NMT_detail;
- } else {
+#else
jio_fprintf(defaultStream::error_stream(),
- "NMT detail is not supported on this platform. Using NMT summary instead.");
+ "NMT detail is not supported on this platform. Using NMT summary instead.\n");
_tracking_level = NMT_summary;
- }
+#endif
} else if (strcmp(option_line, "=off") != 0) {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Jul 15 15:14:58 2013 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Thu Jul 18 00:52:54 2013 -0400
@@ -381,12 +381,12 @@
#endif
/*
- * If a platform does not support NMT_detail
+ * If a platform does not support native stack walking
* the platform specific globalDefinitions (above)
- * can set PLATFORM_NMT_DETAIL_SUPPORTED to false
+ * can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0
*/
-#ifndef PLATFORM_NMT_DETAIL_SUPPORTED
-#define PLATFORM_NMT_DETAIL_SUPPORTED true
+#ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+#define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
#endif
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.