--- a/.hgtags-top-repo Thu Nov 13 10:22:24 2014 -0800
+++ b/.hgtags-top-repo Wed Jul 05 20:07:30 2017 +0200
@@ -281,3 +281,4 @@
201d4e235d597a25a2d3ee1404394789ba386119 jdk9-b36
723a67b0c442391447b1d8aad8b249d06d1032e8 jdk9-b37
d42c0a90afc3c66ca87543076ec9aafd4b4680de jdk9-b38
+512dbbeb1730edcebfec873fc3f1455660b32000 jdk9-b39
--- a/hotspot/.hgtags Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/.hgtags Wed Jul 05 20:07:30 2017 +0200
@@ -441,3 +441,4 @@
464ab653fbb17eb518d8ef60f8df301de7ef00d0 jdk9-b36
b1c2dd843f247a1db19e1e85eb62ca405f72dc26 jdk9-b37
c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
+9cb75e5e394827ccbaf2e15524108a412dc4ddc5 jdk9-b39
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/CodeCacheSweeperThread.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.runtime;
+
+import java.io.*;
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.types.*;
+
+public class CodeCacheSweeperThread extends JavaThread {
+ public CodeCacheSweeperThread(Address addr) {
+ super(addr);
+ }
+
+ public boolean isJavaThread() { return false; }
+ public boolean isHiddenFromExternalView() { return true; }
+ public boolean isCodeCacheSweeperThread() { return true; }
+
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java Wed Jul 05 20:07:30 2017 +0200
@@ -118,9 +118,10 @@
return VM.getVM().getThreads().createJavaThreadWrapper(threadAddr);
}
- /** NOTE: for convenience, this differs in definition from the
- underlying VM. Only "pure" JavaThreads return true;
- CompilerThreads and JVMDIDebuggerThreads return false. FIXME:
+ /** NOTE: for convenience, this differs in definition from the underlying VM.
+ Only "pure" JavaThreads return true; CompilerThreads, the CodeCacheSweeperThread,
+ JVMDIDebuggerThreads return false.
+ FIXME:
consider encapsulating platform-specific functionality in an
object instead of using inheritance (which is the primary reason
we can't traverse CompilerThreads, etc; didn't want to have, for
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Wed Jul 05 20:07:30 2017 +0200
@@ -111,14 +111,15 @@
return allocatedBytesField.getValue(addr);
}
- public boolean isVMThread() { return false; }
- public boolean isJavaThread() { return false; }
- public boolean isCompilerThread() { return false; }
- public boolean isHiddenFromExternalView() { return false; }
- public boolean isJvmtiAgentThread() { return false; }
- public boolean isWatcherThread() { return false; }
+ public boolean isVMThread() { return false; }
+ public boolean isJavaThread() { return false; }
+ public boolean isCompilerThread() { return false; }
+ public boolean isCodeCacheSweeperThread() { return false; }
+ public boolean isHiddenFromExternalView() { return false; }
+ public boolean isJvmtiAgentThread() { return false; }
+ public boolean isWatcherThread() { return false; }
public boolean isConcurrentMarkSweepThread() { return false; }
- public boolean isServiceThread() { return false; }
+ public boolean isServiceThread() { return false; }
/** Memory operations */
public void oopsDo(AddressVisitor oopVisitor) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java Wed Jul 05 20:07:30 2017 +0200
@@ -120,6 +120,7 @@
virtualConstructor.addMapping("JavaThread", JavaThread.class);
if (!VM.getVM().isCore()) {
virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
+ virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
}
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
@@ -164,7 +165,7 @@
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
- " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, or SurrogateLockerThread)", e);
+ " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, SurrogateLockerThread, or CodeCacheSweeperThread)", e);
}
}
@@ -201,7 +202,7 @@
public List getPendingThreads(ObjectMonitor monitor) {
List pendingThreads = new ArrayList();
for (JavaThread thread = first(); thread != null; thread = thread.next()) {
- if (thread.isCompilerThread()) {
+ if (thread.isCompilerThread() || thread.isCodeCacheSweeperThread()) {
continue;
}
ObjectMonitor pending = thread.getCurrentPendingMonitor();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Wed Jul 05 20:07:30 2017 +0200
@@ -836,6 +836,7 @@
// Java Threads
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
+vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
--- a/hotspot/make/solaris/makefiles/vm.make Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/make/solaris/makefiles/vm.make Wed Jul 05 20:07:30 2017 +0200
@@ -143,7 +143,7 @@
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
endif # sparcWorks
-LIBS += -lkstat -lpicl
+LIBS += -lkstat
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
--- a/hotspot/make/windows/makefiles/compile.make Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/make/windows/makefiles/compile.make Wed Jul 05 20:07:30 2017 +0200
@@ -158,7 +158,7 @@
!endif
LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
- uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
+ uuid.lib Wsock32.lib winmm.lib version.lib /nologo /machine:$(MACHINE) /opt:REF \
/opt:ICF,8
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
LD_FLAGS= $(LD_FLAGS) /map /debug
--- a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -237,7 +237,7 @@
// handle arguments
// Warning: We use reg arg slot 00 temporarily to return the RegArgSignature
// back to the code that pops the arguments into the CPU registers
- SlowSignatureHandler(m, (address)from, m->is_static() ? to+2 : to+1, to).iterate(UCONST64(-1));
+ SlowSignatureHandler(m, (address)from, m->is_static() ? to+2 : to+1, to).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());
IRT_END
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -60,10 +60,10 @@
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
// Static initialization during VM startup.
-static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
-static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
-static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
-static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
+static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
+static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
+static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000));
+static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1597,7 +1597,7 @@
__ movl(rdx, 0x80000000);
__ xorl(rax, rax);
#else
- __ mov64(rax, CONST64(0x8000000000000000));
+ __ mov64(rax, UCONST64(0x8000000000000000));
#endif // _LP64
__ jmp(do_return);
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -135,7 +135,7 @@
methodHandle m(thread, (Method*)method);
assert(m->is_native(), "sanity check");
// handle arguments
- SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
+ SlowSignatureHandler(m, (address)from, to + 1).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());
IRT_END
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -487,7 +487,7 @@
assert(m->is_native(), "sanity check");
// handle arguments
- SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
+ SlowSignatureHandler(m, (address)from, to + 1).iterate((uint64_t)CONST64(-1));
// return result handler
return Interpreter::result_handler(m->result_type());
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -865,14 +865,19 @@
if (supports_bmi1()) {
// tzcnt does not require VEX prefix
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
- UseCountTrailingZerosInstruction = true;
+ if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
+ // Don't use tzcnt if BMI1 is switched off on command line.
+ UseCountTrailingZerosInstruction = false;
+ } else {
+ UseCountTrailingZerosInstruction = true;
+ }
}
} else if (UseCountTrailingZerosInstruction) {
warning("tzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
}
- // BMI instructions use an encoding with VEX prefix.
+ // BMI instructions (except tzcnt) use an encoding with VEX prefix.
// VEX prefix is generated only when AVX > 0.
if (supports_bmi1() && supports_avx()) {
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
--- a/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/cpu/zero/vm/interpreterRT_zero.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -155,7 +155,7 @@
intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
- sshg.generate(UCONST64(-1));
+ sshg.generate((uint64_t)CONST64(-1));
SignatureHandler *handler = sshg.handler();
handler->finalize();
--- a/hotspot/src/os/aix/vm/os_aix.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1641,7 +1641,8 @@
char* rp = realpath((char *)dlinfo.dli_fname, buf);
assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
- strcpy(saved_jvm_path, buf);
+ strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
+ saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -3829,11 +3830,6 @@
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Aix::page_size())) {
@@ -4137,15 +4133,6 @@
return 1;
}
-int os::socket_available(int fd, jint *pbytes) {
- // Linux doc says EINTR not returned, unlike Solaris
- int ret = ::ioctl(fd, FIONREAD, pbytes);
-
- //%% note ioctl can return 0 when successful, JVM_SocketAvailable
- // is expected to return 0 on failure and 1 on success to the jdk.
- return (ret < 0) ? 0 : 1;
-}
-
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -178,92 +178,14 @@
return os::send(fd, buf, nBytes, flags);
}
-inline int os::timeout(int fd, long timeout) {
- julong prevtime,newtime;
- struct timeval t;
-
- gettimeofday(&t, NULL);
- prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
-
- for(;;) {
- struct pollfd pfd;
-
- pfd.fd = fd;
- pfd.events = POLLIN | POLLERR;
-
- int res = ::poll(&pfd, 1, timeout);
-
- if (res == OS_ERR && errno == EINTR) {
-
- // On Linux any value < 0 means "forever"
-
- if(timeout >= 0) {
- gettimeofday(&t, NULL);
- newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
- timeout -= newtime - prevtime;
- if(timeout <= 0)
- return OS_OK;
- prevtime = newtime;
- }
- } else
- return res;
- }
-}
-
-inline int os::listen(int fd, int count) {
- return ::listen(fd, count);
-}
-
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
-inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
- // Linux doc says this can't return EINTR, unlike accept() on Solaris.
- // But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
- return (int)::accept(fd, him, len);
-}
-
-inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
- sockaddr* from, socklen_t* fromlen) {
- RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
-}
-
-inline int os::sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr* to, socklen_t tolen) {
- RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
-}
-
-inline int os::socket_shutdown(int fd, int howto) {
- return ::shutdown(fd, howto);
-}
-
-inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
- return ::bind(fd, him, len);
-}
-
-inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
- return ::getsockname(fd, him, len);
-}
-
-inline int os::get_host_name(char* name, int namelen) {
- return ::gethostname(name, namelen);
-}
-
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
-inline int os::get_sock_opt(int fd, int level, int optname,
- char* optval, socklen_t* optlen) {
- return ::getsockopt(fd, level, optname, optval, optlen);
-}
-
-inline int os::set_sock_opt(int fd, int level, int optname,
- const char* optval, socklen_t optlen) {
- return ::setsockopt(fd, level, optname, optval, optlen);
-}
-
inline bool os::supports_monotonic_clock() {
// mread_real_time() is monotonic on AIX (see os::javaTimeNanos() comments)
return true;
--- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -506,6 +506,7 @@
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
+ os::closedir(dirp);
return;
}
@@ -853,6 +854,9 @@
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+ if (luser != user) {
+ FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+ }
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1875,6 +1875,7 @@
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
+ saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -3635,9 +3636,6 @@
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) { }
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Bsd::page_size())) {
@@ -3958,21 +3956,6 @@
return 1;
}
-int os::socket_available(int fd, jint *pbytes) {
- if (fd < 0) {
- return OS_OK;
- }
-
- int ret;
-
- RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
-
- //%% note ioctl can return 0 when successful, JVM_SocketAvailable
- // is expected to return 0 on failure and 1 on success to the jdk.
-
- return (ret == OS_ERR) ? 0 : 1;
-}
-
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@@ -4133,7 +4116,18 @@
}
-// Refer to the comments in os_solaris.cpp park-unpark.
+// Refer to the comments in os_solaris.cpp park-unpark. The next two
+// comment paragraphs are worth repeating here:
+//
+// Assumption:
+// Only one parker can exist on an event, which is why we allocate
+// them per-thread. Multiple unparkers can coexist.
+//
+// _Event serves as a restricted-range semaphore.
+// -1 : thread is blocked, i.e. there is a waiter
+// 0 : neutral: thread is running or ready,
+// could have been signaled after a wait started
+// 1 : signaled - thread is running or ready
//
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
@@ -4218,6 +4212,11 @@
}
void os::PlatformEvent::park() { // AKA "down()"
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
@@ -4255,6 +4254,11 @@
}
int os::PlatformEvent::park(jlong millis) {
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
guarantee(_nParked == 0, "invariant");
int v;
@@ -4318,11 +4322,11 @@
void os::PlatformEvent::unpark() {
// Transitions for _Event:
- // 0 :=> 1
- // 1 :=> 1
- // -1 :=> either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
+ // 0 => 1 : just return
+ // 1 => 1 : just return
+ // -1 => either 0 or 1; must signal target thread
+ // That is, we can safely transition _Event from -1 to either
+ // 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -4345,15 +4349,16 @@
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
+ // Note that we signal() *after* dropping the lock for "immortal" Events.
+ // This is safe and avoids a common class of futile wakeups. In rare
+ // circumstances this can cause a thread to return prematurely from
+ // cond_{timed}wait() but the spurious wakeup is benign and the victim
+ // will simply re-test the condition and re-park itself.
+ // This provides particular benefit if the underlying platform does not
+ // provide wait morphing.
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
-
- // Note that we signal() _after dropping the lock for "immortal" Events.
- // This is safe and avoids a common class of futile wakeups. In rare
- // circumstances this can cause a thread to return prematurely from
- // cond_{timed}wait() but the spurious wakeup is benign and the victim will
- // simply re-test the condition and re-park itself.
}
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -181,91 +181,14 @@
return os::send(fd, buf, nBytes, flags);
}
-inline int os::timeout(int fd, long timeout) {
- julong prevtime,newtime;
- struct timeval t;
-
- gettimeofday(&t, NULL);
- prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
-
- for(;;) {
- struct pollfd pfd;
-
- pfd.fd = fd;
- pfd.events = POLLIN | POLLERR;
-
- int res = ::poll(&pfd, 1, timeout);
-
- if (res == OS_ERR && errno == EINTR) {
-
- // On Bsd any value < 0 means "forever"
-
- if(timeout >= 0) {
- gettimeofday(&t, NULL);
- newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
- timeout -= newtime - prevtime;
- if(timeout <= 0)
- return OS_OK;
- prevtime = newtime;
- }
- } else
- return res;
- }
-}
-
-inline int os::listen(int fd, int count) {
- return ::listen(fd, count);
-}
-
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
-inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
- // At least OpenBSD and FreeBSD can return EINTR from accept.
- RESTARTABLE_RETURN_INT(::accept(fd, him, len));
-}
-
-inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
- sockaddr* from, socklen_t* fromlen) {
- RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
-}
-
-inline int os::sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr *to, socklen_t tolen) {
- RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
-}
-
-inline int os::socket_shutdown(int fd, int howto) {
- return ::shutdown(fd, howto);
-}
-
-inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
- return ::bind(fd, him, len);
-}
-
-inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
- return ::getsockname(fd, him, len);
-}
-
-inline int os::get_host_name(char* name, int namelen) {
- return ::gethostname(name, namelen);
-}
-
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
-inline int os::get_sock_opt(int fd, int level, int optname,
- char *optval, socklen_t* optlen) {
- return ::getsockopt(fd, level, optname, optval, optlen);
-}
-
-inline int os::set_sock_opt(int fd, int level, int optname,
- const char* optval, socklen_t optlen) {
- return ::setsockopt(fd, level, optname, optval, optlen);
-}
-
inline bool os::supports_monotonic_clock() {
#ifdef __APPLE__
return true;
--- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -506,6 +506,7 @@
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
+ os::closedir(dirp);
return;
}
@@ -872,6 +873,9 @@
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+ if (luser != user) {
+ FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+ }
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -163,35 +163,6 @@
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
-#ifdef JAVASE_EMBEDDED
-class MemNotifyThread: public Thread {
- friend class VMStructs;
- public:
- virtual void run();
-
- private:
- static MemNotifyThread* _memnotify_thread;
- int _fd;
-
- public:
-
- // Constructor
- MemNotifyThread(int fd);
-
- // Tester
- bool is_memnotify_thread() const { return true; }
-
- // Printing
- char* name() const { return (char*)"Linux MemNotify Thread"; }
-
- // Returns the single instance of the MemNotifyThread
- static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
-
- // Create and start the single instance of MemNotifyThread
- static void start();
-};
-#endif // JAVASE_EMBEDDED
-
// utility functions
static int SR_initialize();
@@ -384,7 +355,10 @@
// Found the full path to libjvm.so.
// Now cut the path to <java_home>/jre if we can.
- *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
+ pslash = strrchr(buf, '/');
+ if (pslash != NULL) {
+ *pslash = '\0'; // Get rid of /libjvm.so.
+ }
pslash = strrchr(buf, '/');
if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
@@ -1223,7 +1197,7 @@
i = 0;
if (s) {
// Skip blank chars
- do s++; while (isspace(*s));
+ do { s++; } while (s && isspace(*s));
#define _UFM UINTX_FORMAT
#define _DFM INTX_FORMAT
@@ -2372,6 +2346,9 @@
// Check the current module name "libjvm.so".
p = strrchr(buf, '/');
+ if (p == NULL) {
+ return;
+ }
assert(strstr(p, "/libjvm") == p, "invalid library name");
rp = realpath(java_home_var, buf);
@@ -2405,6 +2382,7 @@
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
+ saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -4866,17 +4844,6 @@
return JNI_OK;
}
-// this is called at the end of vm_initialization
-void os::init_3(void) {
-#ifdef JAVASE_EMBEDDED
- // Start the MemNotifyThread
- if (LowMemoryProtection) {
- MemNotifyThread::start();
- }
- return;
-#endif
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (!guard_memory((char*)_polling_page, Linux::page_size())) {
@@ -5103,9 +5070,38 @@
errno = ENAMETOOLONG;
return -1;
}
- int fd;
-
- fd = ::open64(path, oflag, mode);
+
+ // All file descriptors that are opened in the Java process and not
+ // specifically destined for a subprocess should have the close-on-exec
+ // flag set. If we don't set it, then careless 3rd party native code
+ // might fork and exec without closing all appropriate file descriptors
+ // (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
+ // turn might:
+ //
+ // - cause end-of-file to fail to be detected on some file
+ // descriptors, resulting in mysterious hangs, or
+ //
+ // - might cause an fopen in the subprocess to fail on a system
+ // suffering from bug 1085341.
+ //
+ // (Yes, the default setting of the close-on-exec flag is a Unix
+ // design flaw)
+ //
+ // See:
+ // 1085341: 32-bit stdio routines should support file descriptors >255
+ // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
+ // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
+ //
+ // Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
+ // O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
+ // because it saves a system call and removes a small window where the flag
+ // is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
+ // and we fall back to using FD_CLOEXEC (see below).
+#ifdef O_CLOEXEC
+ oflag |= O_CLOEXEC;
+#endif
+
+ int fd = ::open64(path, oflag, mode);
if (fd == -1) return -1;
//If the open succeeded, the file might still be a directory
@@ -5126,32 +5122,17 @@
}
}
- // All file descriptors that are opened in the JVM and not
- // specifically destined for a subprocess should have the
- // close-on-exec flag set. If we don't set it, then careless 3rd
- // party native code might fork and exec without closing all
- // appropriate file descriptors (e.g. as we do in closeDescriptors in
- // UNIXProcess.c), and this in turn might:
- //
- // - cause end-of-file to fail to be detected on some file
- // descriptors, resulting in mysterious hangs, or
- //
- // - might cause an fopen in the subprocess to fail on a system
- // suffering from bug 1085341.
- //
- // (Yes, the default setting of the close-on-exec flag is a Unix
- // design flaw)
- //
- // See:
- // 1085341: 32-bit stdio routines should support file descriptors >255
- // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
- // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
- //
#ifdef FD_CLOEXEC
- {
+ // Validate that the use of the O_CLOEXEC flag on open above worked.
+ // With recent kernels, we will perform this check exactly once.
+ static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
+ if (!O_CLOEXEC_is_known_to_work) {
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1) {
- ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+ if ((flags & FD_CLOEXEC) != 0)
+ O_CLOEXEC_is_known_to_work = 1;
+ else
+ ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
}
#endif
@@ -5211,15 +5192,6 @@
return 1;
}
-int os::socket_available(int fd, jint *pbytes) {
- // Linux doc says EINTR not returned, unlike Solaris
- int ret = ::ioctl(fd, FIONREAD, pbytes);
-
- //%% note ioctl can return 0 when successful, JVM_SocketAvailable
- // is expected to return 0 on failure and 1 on success to the jdk.
- return (ret < 0) ? 0 : 1;
-}
-
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@@ -5349,7 +5321,7 @@
if (s == NULL) return -1;
// Skip blank chars
- do s++; while (isspace(*s));
+ do { s++; } while (s && isspace(*s));
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
@@ -5410,7 +5382,18 @@
}
-// Refer to the comments in os_solaris.cpp park-unpark.
+// Refer to the comments in os_solaris.cpp park-unpark. The next two
+// comment paragraphs are worth repeating here:
+//
+// Assumption:
+// Only one parker can exist on an event, which is why we allocate
+// them per-thread. Multiple unparkers can coexist.
+//
+// _Event serves as a restricted-range semaphore.
+// -1 : thread is blocked, i.e. there is a waiter
+// 0 : neutral: thread is running or ready,
+// could have been signaled after a wait started
+// 1 : signaled - thread is running or ready
//
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
@@ -5509,6 +5492,11 @@
}
void os::PlatformEvent::park() { // AKA "down()"
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
@@ -5546,6 +5534,11 @@
}
int os::PlatformEvent::park(jlong millis) {
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
guarantee(_nParked == 0, "invariant");
int v;
@@ -5609,11 +5602,11 @@
void os::PlatformEvent::unpark() {
// Transitions for _Event:
- // 0 :=> 1
- // 1 :=> 1
- // -1 :=> either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
+ // 0 => 1 : just return
+ // 1 => 1 : just return
+ // -1 => either 0 or 1; must signal target thread
+ // That is, we can safely transition _Event from -1 to either
+ // 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -5636,15 +5629,16 @@
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
if (AnyWaiters != 0) {
+ // Note that we signal() *after* dropping the lock for "immortal" Events.
+ // This is safe and avoids a common class of futile wakeups. In rare
+ // circumstances this can cause a thread to return prematurely from
+ // cond_{timed}wait() but the spurious wakeup is benign and the victim
+ // will simply re-test the condition and re-park itself.
+ // This provides particular benefit if the underlying platform does not
+ // provide wait morphing.
status = pthread_cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
-
- // Note that we signal() _after dropping the lock for "immortal" Events.
- // This is safe and avoids a common class of futile wakeups. In rare
- // circumstances this can cause a thread to return prematurely from
- // cond_{timed}wait() but the spurious wakeup is benign and the victim will
- // simply re-test the condition and re-park itself.
}
@@ -6006,82 +6000,6 @@
return strlen(buffer);
}
-#ifdef JAVASE_EMBEDDED
-//
-// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
-//
-MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
-
-// ctor
-//
-MemNotifyThread::MemNotifyThread(int fd): Thread() {
- assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
- _fd = fd;
-
- if (os::create_thread(this, os::os_thread)) {
- _memnotify_thread = this;
- os::set_priority(this, NearMaxPriority);
- os::start_thread(this);
- }
-}
-
-// Where all the work gets done
-//
-void MemNotifyThread::run() {
- assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
-
- // Set up the select arguments
- fd_set rfds;
- if (_fd != -1) {
- FD_ZERO(&rfds);
- FD_SET(_fd, &rfds);
- }
-
- // Now wait for the mem_notify device to wake up
- while (1) {
- // Wait for the mem_notify device to signal us..
- int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
- if (rc == -1) {
- perror("select!\n");
- break;
- } else if (rc) {
- //ssize_t free_before = os::available_memory();
- //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
-
- // The kernel is telling us there is not much memory left...
- // try to do something about that
-
- // If we are not already in a GC, try one.
- if (!Universe::heap()->is_gc_active()) {
- Universe::heap()->collect(GCCause::_allocation_failure);
-
- //ssize_t free_after = os::available_memory();
- //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
- //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
- }
- // We might want to do something like the following if we find the GC's are not helping...
- // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
- }
- }
-}
-
-// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
-//
-void MemNotifyThread::start() {
- int fd;
- fd = open("/dev/mem_notify", O_RDONLY, 0);
- if (fd < 0) {
- return;
- }
-
- if (memnotify_thread() == NULL) {
- new MemNotifyThread(fd);
- }
-}
-
-#endif // JAVASE_EMBEDDED
-
-
/////////////// Unit tests ///////////////
#ifndef PRODUCT
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -173,92 +173,14 @@
return os::send(fd, buf, nBytes, flags);
}
-inline int os::timeout(int fd, long timeout) {
- julong prevtime,newtime;
- struct timeval t;
-
- gettimeofday(&t, NULL);
- prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
-
- for(;;) {
- struct pollfd pfd;
-
- pfd.fd = fd;
- pfd.events = POLLIN | POLLERR;
-
- int res = ::poll(&pfd, 1, timeout);
-
- if (res == OS_ERR && errno == EINTR) {
-
- // On Linux any value < 0 means "forever"
-
- if(timeout >= 0) {
- gettimeofday(&t, NULL);
- newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
- timeout -= newtime - prevtime;
- if(timeout <= 0)
- return OS_OK;
- prevtime = newtime;
- }
- } else
- return res;
- }
-}
-
-inline int os::listen(int fd, int count) {
- return ::listen(fd, count);
-}
-
inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
}
-inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
- // Linux doc says this can't return EINTR, unlike accept() on Solaris.
- // But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
- return (int)::accept(fd, him, len);
-}
-
-inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
- sockaddr* from, socklen_t* fromlen) {
- RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
-}
-
-inline int os::sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr* to, socklen_t tolen) {
- RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
-}
-
-inline int os::socket_shutdown(int fd, int howto) {
- return ::shutdown(fd, howto);
-}
-
-inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
- return ::bind(fd, him, len);
-}
-
-inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
- return ::getsockname(fd, him, len);
-}
-
-inline int os::get_host_name(char* name, int namelen) {
- return ::gethostname(name, namelen);
-}
-
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
-inline int os::get_sock_opt(int fd, int level, int optname,
- char* optval, socklen_t* optlen) {
- return ::getsockopt(fd, level, optname, optval, optlen);
-}
-
-inline int os::set_sock_opt(int fd, int level, int optname,
- const char* optval, socklen_t optlen) {
- return ::setsockopt(fd, level, optname, optval, optlen);
-}
-
inline bool os::supports_monotonic_clock() {
return Linux::_clock_gettime != NULL;
}
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -506,6 +506,7 @@
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
+ os::closedir(dirp);
return;
}
@@ -872,6 +873,9 @@
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+ if (luser != user) {
+ FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+ }
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}
--- a/hotspot/src/os/posix/vm/os_posix.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/posix/vm/os_posix.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -663,7 +663,10 @@
}
}
- jio_snprintf(out, outlen, ret);
+ if (out && outlen > 0) {
+ strncpy(out, ret, outlen);
+ out[outlen - 1] = '\0';
+ }
return out;
}
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -2221,6 +2221,7 @@
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
+ saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
@@ -4761,10 +4762,6 @@
return JNI_OK;
}
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
@@ -5372,31 +5369,32 @@
// to immediately return 0 your code should still work,
// albeit degenerating to a spin loop.
//
-// An interesting optimization for park() is to use a trylock()
-// to attempt to acquire the mutex. If the trylock() fails
-// then we know that a concurrent unpark() operation is in-progress.
-// in that case the park() code could simply set _count to 0
-// and return immediately. The subsequent park() operation *might*
-// return immediately. That's harmless as the caller of park() is
-// expected to loop. By using trylock() we will have avoided a
-// avoided a context switch caused by contention on the per-thread mutex.
+// In a sense, park()-unpark() just provides more polite spinning
+// and polling with the key difference over naive spinning being
+// that a parked thread needs to be explicitly unparked() in order
+// to wake up and to poll the underlying condition.
//
-// TODO-FIXME:
-// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
-// objectmonitor implementation.
-// 2. Collapse the JSR166 parker event, and the
-// objectmonitor ParkEvent into a single "Event" construct.
-// 3. In park() and unpark() add:
-// assert (Thread::current() == AssociatedWith).
-// 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
-// 1-out-of-N park() operations will return immediately.
+// Assumption:
+// Only one parker can exist on an event, which is why we allocate
+// them per-thread. Multiple unparkers can coexist.
//
// _Event transitions in park()
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
-// 0 => -1 : block
+// 0 => -1 : block; then set _Event to 0 before returning
+//
+// _Event transitions in unpark()
+// 0 => 1 : just return
+// 1 => 1 : just return
+// -1 => either 0 or 1; must signal target thread
+// That is, we can safely transition _Event from -1 to either
+// 0 or 1.
//
// _Event serves as a restricted-range semaphore.
+// -1 : thread is blocked, i.e. there is a waiter
+// 0 : neutral: thread is running or ready,
+// could have been signaled after a wait started
+// 1 : signaled - thread is running or ready
//
// Another possible encoding of _Event would be with
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
@@ -5456,6 +5454,11 @@
}
void os::PlatformEvent::park() { // AKA: down()
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
assert(_nParked == 0, "invariant");
@@ -5497,6 +5500,11 @@
}
int os::PlatformEvent::park(jlong millis) {
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
guarantee(_nParked == 0, "invariant");
int v;
for (;;) {
@@ -5542,11 +5550,11 @@
void os::PlatformEvent::unpark() {
// Transitions for _Event:
- // 0 :=> 1
- // 1 :=> 1
- // -1 :=> either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
+ // 0 => 1 : just return
+ // 1 => 1 : just return
+ // -1 => either 0 or 1; must signal target thread
+ // That is, we can safely transition _Event from -1 to either
+ // 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -5566,8 +5574,13 @@
assert_status(status == 0, status, "mutex_unlock");
guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
if (AnyWaiters != 0) {
- // We intentional signal *after* dropping the lock
- // to avoid a common class of futile wakeups.
+ // Note that we signal() *after* dropping the lock for "immortal" Events.
+ // This is safe and avoids a common class of futile wakeups. In rare
+ // circumstances this can cause a thread to return prematurely from
+ // cond_{timed}wait() but the spurious wakeup is benign and the victim
+ // will simply re-test the condition and re-park itself.
+ // This provides particular benefit if the underlying platform does not
+ // provide wait morphing.
status = os::Solaris::cond_signal(_cond);
assert_status(status == 0, status, "cond_signal");
}
@@ -5912,37 +5925,6 @@
// a poll() is done with timeout == -1, in which case we repeat with this
// "wait forever" value.
-int os::timeout(int fd, long timeout) {
- int res;
- struct timeval t;
- julong prevtime, newtime;
- static const char* aNull = 0;
- struct pollfd pfd;
- pfd.fd = fd;
- pfd.events = POLLIN;
-
- assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
- "Assumed _thread_in_native");
-
- gettimeofday(&t, &aNull);
- prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
-
- for (;;) {
- res = ::poll(&pfd, 1, timeout);
- if (res == OS_ERR && errno == EINTR) {
- if (timeout != -1) {
- gettimeofday(&t, &aNull);
- newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
- timeout -= newtime - prevtime;
- if (timeout <= 0) {
- return OS_OK;
- }
- prevtime = newtime;
- }
- } else return res;
- }
-}
-
int os::connect(int fd, struct sockaddr *him, socklen_t len) {
int _result;
_result = ::connect(fd, him, len);
@@ -5982,46 +5964,6 @@
return _result;
}
-int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
- if (fd < 0) {
- return OS_ERR;
- }
- assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
- "Assumed _thread_in_native");
- RESTARTABLE_RETURN_INT((int)::accept(fd, him, len));
-}
-
-int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
- sockaddr* from, socklen_t* fromlen) {
- assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
- "Assumed _thread_in_native");
- RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
-}
-
-int os::sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr* to, socklen_t tolen) {
- assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
- "Assumed _thread_in_native");
- RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
-}
-
-int os::socket_available(int fd, jint *pbytes) {
- if (fd < 0) {
- return OS_OK;
- }
- int ret;
- RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
- // note: ioctl can return 0 when successful, JVM_SocketAvailable
- // is expected to return 0 on failure and 1 on success to the jdk.
- return (ret == OS_ERR) ? 0 : 1;
-}
-
-int os::bind(int fd, struct sockaddr* him, socklen_t len) {
- assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
- "Assumed _thread_in_native");
- return ::bind(fd, him, len);
-}
-
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -120,38 +120,10 @@
return ::socket(domain, type, protocol);
}
-inline int os::listen(int fd, int count) {
- if (fd < 0) return OS_ERR;
-
- return ::listen(fd, count);
-}
-
-inline int os::socket_shutdown(int fd, int howto){
- return ::shutdown(fd, howto);
-}
-
-inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len){
- return ::getsockname(fd, him, len);
-}
-
-inline int os::get_host_name(char* name, int namelen){
- return ::gethostname(name, namelen);
-}
-
inline struct hostent* os::get_host_by_name(char* name) {
return ::gethostbyname(name);
}
-inline int os::get_sock_opt(int fd, int level, int optname,
- char* optval, socklen_t* optlen) {
- return ::getsockopt(fd, level, optname, optval, optlen);
-}
-
-inline int os::set_sock_opt(int fd, int level, int optname,
- const char *optval, socklen_t optlen) {
- return ::setsockopt(fd, level, optname, optval, optlen);
-}
-
inline bool os::supports_monotonic_clock() {
// javaTimeNanos() is monotonic on Solaris, see getTimeNanos() comments
return true;
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -545,6 +545,7 @@
if (!is_directory_secure(dirname)) {
// the directory is not a secure directory
+ os::closedir(dirp);
return;
}
@@ -890,6 +891,9 @@
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+ if (luser != user) {
+ FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+ }
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}
--- a/hotspot/src/os/windows/vm/attachListener_windows.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/windows/vm/attachListener_windows.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -30,6 +30,7 @@
#include <windows.h>
#include <signal.h> // SIGBREAK
+#include <stdio.h>
// The AttachListener thread services a queue of operations. It blocks in the dequeue
// function until an operation is enqueued. A client enqueues an operation by creating
@@ -269,6 +270,7 @@
if (hPipe != INVALID_HANDLE_VALUE) {
// shouldn't happen as there is a pipe created per operation
if (::GetLastError() == ERROR_PIPE_BUSY) {
+ ::CloseHandle(hPipe);
return INVALID_HANDLE_VALUE;
}
}
@@ -313,7 +315,8 @@
BOOL fSuccess;
char msg[32];
- sprintf(msg, "%d\n", result);
+ _snprintf(msg, sizeof(msg), "%d\n", result);
+ msg[sizeof(msg) - 1] = '\0';
fSuccess = write_pipe(hPipe, msg, (int)strlen(msg));
if (fSuccess) {
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -96,7 +96,7 @@
#include <vdmdbg.h>
// for timer info max values which include all bits
-#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
+#define ALL_64_BITS CONST64(-1)
// For DLL loading/load error detection
// Values of PE COFF
@@ -211,6 +211,7 @@
}
strcpy(home_path, home_dir);
Arguments::set_java_home(home_path);
+ FREE_C_HEAP_ARRAY(char, home_path, mtInternal);
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
mtInternal);
@@ -220,6 +221,7 @@
strcpy(dll_path, home_dir);
strcat(dll_path, bin);
Arguments::set_dll_dir(dll_path);
+ FREE_C_HEAP_ARRAY(char, dll_path, mtInternal);
if (!set_boot_path('\\', ';')) {
return;
@@ -297,6 +299,9 @@
char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
Arguments::set_endorsed_dirs(buf);
+ // (Arguments::set_endorsed_dirs() calls SystemProperty::set_value(), which
+ // duplicates the input.)
+ FREE_C_HEAP_ARRAY(char, buf, mtInternal);
#undef ENDORSED_DIR
}
@@ -436,9 +441,9 @@
}
// Diagnostic code to investigate JDK-6573254
- int res = 90115; // non-java thread
+ int res = 50115; // non-java thread
if (thread->is_Java_thread()) {
- res = 60115; // java thread
+ res = 40115; // java thread
}
// Install a win32 structured exception handler around every thread created
@@ -1610,96 +1615,123 @@
void os::win32::print_windows_version(outputStream* st) {
OSVERSIONINFOEX osvi;
- SYSTEM_INFO si;
-
+ VS_FIXEDFILEINFO *file_info;
+ TCHAR kernel32_path[MAX_PATH];
+ UINT len, ret;
+
+ // Use the GetVersionEx information to see if we're on a server or
+ // workstation edition of Windows. Starting with Windows 8.1 we can't
+ // trust the OS version information returned by this API.
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
-
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
- st->print_cr("N/A");
+ st->print_cr("Call to GetVersionEx failed");
+ return;
+ }
+ bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
+
+ // Get the full path to \Windows\System32\kernel32.dll and use that for
+ // determining what version of Windows we're running on.
+ len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
+ ret = GetSystemDirectory(kernel32_path, len);
+ if (ret == 0 || ret > len) {
+ st->print_cr("Call to GetSystemDirectory failed");
+ return;
+ }
+ strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
+
+ DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
+ if (version_size == 0) {
+ st->print_cr("Call to GetFileVersionInfoSize failed");
+ return;
+ }
+
+ LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
+ if (version_info == NULL) {
+ st->print_cr("Failed to allocate version_info");
return;
}
- int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
-
- ZeroMemory(&si, sizeof(SYSTEM_INFO));
- if (os_vers >= 5002) {
- // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
- // find out whether we are running on 64 bit processor or not.
- if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
- os::Kernel32Dll::GetNativeSystemInfo(&si);
+ if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
+ os::free(version_info);
+ st->print_cr("Call to GetFileVersionInfo failed");
+ return;
+ }
+
+ if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
+ os::free(version_info);
+ st->print_cr("Call to VerQueryValue failed");
+ return;
+ }
+
+ int major_version = HIWORD(file_info->dwProductVersionMS);
+ int minor_version = LOWORD(file_info->dwProductVersionMS);
+ int build_number = HIWORD(file_info->dwProductVersionLS);
+ int build_minor = LOWORD(file_info->dwProductVersionLS);
+ int os_vers = major_version * 1000 + minor_version;
+ os::free(version_info);
+
+ st->print(" Windows ");
+ switch (os_vers) {
+
+ case 6000:
+ if (is_workstation) {
+ st->print("Vista");
} else {
- GetSystemInfo(&si);
+ st->print("Server 2008");
}
- }
-
- if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
- switch (os_vers) {
- case 3051: st->print(" Windows NT 3.51"); break;
- case 4000: st->print(" Windows NT 4.0"); break;
- case 5000: st->print(" Windows 2000"); break;
- case 5001: st->print(" Windows XP"); break;
- case 5002:
- if (osvi.wProductType == VER_NT_WORKSTATION &&
- si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
- st->print(" Windows XP x64 Edition");
- } else {
- st->print(" Windows Server 2003 family");
- }
- break;
-
- case 6000:
- if (osvi.wProductType == VER_NT_WORKSTATION) {
- st->print(" Windows Vista");
- } else {
- st->print(" Windows Server 2008");
- }
- break;
-
- case 6001:
- if (osvi.wProductType == VER_NT_WORKSTATION) {
- st->print(" Windows 7");
- } else {
- st->print(" Windows Server 2008 R2");
- }
- break;
-
- case 6002:
- if (osvi.wProductType == VER_NT_WORKSTATION) {
- st->print(" Windows 8");
- } else {
- st->print(" Windows Server 2012");
- }
- break;
-
- case 6003:
- if (osvi.wProductType == VER_NT_WORKSTATION) {
- st->print(" Windows 8.1");
- } else {
- st->print(" Windows Server 2012 R2");
- }
- break;
-
- default: // future os
- // Unrecognized windows, print out its major and minor versions
- st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+ break;
+
+ case 6001:
+ if (is_workstation) {
+ st->print("7");
+ } else {
+ st->print("Server 2008 R2");
+ }
+ break;
+
+ case 6002:
+ if (is_workstation) {
+ st->print("8");
+ } else {
+ st->print("Server 2012");
+ }
+ break;
+
+ case 6003:
+ if (is_workstation) {
+ st->print("8.1");
+ } else {
+ st->print("Server 2012 R2");
}
- } else {
- switch (os_vers) {
- case 4000: st->print(" Windows 95"); break;
- case 4010: st->print(" Windows 98"); break;
- case 4090: st->print(" Windows Me"); break;
- default: // future windows, print out its major and minor versions
- st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+ break;
+
+ case 6004:
+ if (is_workstation) {
+ st->print("10");
+ } else {
+ // The server version name of Windows 10 is not known at this time
+ st->print("%d.%d", major_version, minor_version);
}
- }
-
- if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+ break;
+
+ default:
+ // Unrecognized windows, print out its major and minor versions
+ st->print("%d.%d", major_version, minor_version);
+ break;
+ }
+
+ // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
+ // find out whether we are running on 64 bit processor or not
+ SYSTEM_INFO si;
+ ZeroMemory(&si, sizeof(SYSTEM_INFO));
+ os::Kernel32Dll::GetNativeSystemInfo(&si);
+ if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
st->print(" , 64 bit");
}
- st->print(" Build %d", osvi.dwBuildNumber);
- st->print(" %s", osvi.szCSDVersion); // service pack
+ st->print(" Build %d", build_number);
+ st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
st->cr();
}
@@ -1807,6 +1839,7 @@
GetModuleFileName(vm_lib_handle, buf, buflen);
}
strncpy(saved_jvm_path, buf, MAX_PATH);
+ saved_jvm_path[MAX_PATH - 1] = '\0';
}
@@ -3719,8 +3752,12 @@
// search system directory
if ((size = GetSystemDirectory(path, pathLen)) > 0) {
- strcat(path, "\\");
- strcat(path, name);
+ if (size >= pathLen) {
+ return NULL; // truncated
+ }
+ if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
+ return NULL; // truncated
+ }
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
return result;
}
@@ -3728,8 +3765,12 @@
// try Windows directory
if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
- strcat(path, "\\");
- strcat(path, name);
+ if (size >= pathLen) {
+ return NULL; // truncated
+ }
+ if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
+ return NULL; // truncated
+ }
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
return result;
}
@@ -3740,68 +3781,134 @@
return NULL;
}
-#define MIN_EXIT_MUTEXES 1
-#define MAX_EXIT_MUTEXES 16
-
-struct ExitMutexes {
- DWORD count;
- HANDLE handles[MAX_EXIT_MUTEXES];
-};
-
-static BOOL CALLBACK init_muts_call(PINIT_ONCE, PVOID ppmuts, PVOID*) {
- static ExitMutexes muts;
-
- muts.count = os::processor_count();
- if (muts.count < MIN_EXIT_MUTEXES) {
- muts.count = MIN_EXIT_MUTEXES;
- } else if (muts.count > MAX_EXIT_MUTEXES) {
- muts.count = MAX_EXIT_MUTEXES;
- }
-
- for (DWORD i = 0; i < muts.count; ++i) {
- muts.handles[i] = CreateMutex(NULL, FALSE, NULL);
- if (muts.handles[i] == NULL) {
- return FALSE;
- }
- }
- *((ExitMutexes**)ppmuts) = &muts;
+#define MAX_EXIT_HANDLES 16
+#define EXIT_TIMEOUT 1000 /* 1 sec */
+
+static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
+ InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
return TRUE;
}
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
+ // Basic approach:
+ // - Each exiting thread registers its intent to exit and then does so.
+ // - A thread trying to terminate the process must wait for all
+ // threads currently exiting to complete their exit.
+
if (os::win32::has_exit_bug()) {
- static INIT_ONCE init_once_muts = INIT_ONCE_STATIC_INIT;
- static ExitMutexes* pmuts;
-
- if (!InitOnceExecuteOnce(&init_once_muts, init_muts_call, &pmuts, NULL)) {
- warning("ExitMutex initialization failed in %s: %d\n", __FILE__, __LINE__);
- } else if (WaitForMultipleObjects(pmuts->count, pmuts->handles,
- (what != EPT_THREAD), // exiting process waits for all mutexes
- INFINITE) == WAIT_FAILED) {
- warning("ExitMutex acquisition failed in %s: %d\n", __FILE__, __LINE__);
+ // The array holds handles of the threads that have started exiting by calling
+ // _endthreadex().
+ // Should be large enough to avoid blocking the exiting thread due to lack of
+ // a free slot.
+ static HANDLE handles[MAX_EXIT_HANDLES];
+ static int handle_count = 0;
+
+ static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
+ static CRITICAL_SECTION crit_sect;
+ int i, j;
+ DWORD res;
+ HANDLE hproc, hthr;
+
+ // The first thread that reached this point, initializes the critical section.
+ if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
+ warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
+ } else {
+ EnterCriticalSection(&crit_sect);
+
+ if (what == EPT_THREAD) {
+ // Remove from the array those handles of the threads that have completed exiting.
+ for (i = 0, j = 0; i < handle_count; ++i) {
+ res = WaitForSingleObject(handles[i], 0 /* don't wait */);
+ if (res == WAIT_TIMEOUT) {
+ handles[j++] = handles[i];
+ } else {
+ if (res != WAIT_OBJECT_0) {
+ warning("WaitForSingleObject failed in %s: %d\n", __FILE__, __LINE__);
+ // Don't keep the handle, if we failed waiting for it.
+ }
+ CloseHandle(handles[i]);
+ }
+ }
+
+ // If there's no free slot in the array of the kept handles, we'll have to
+ // wait until at least one thread completes exiting.
+ if ((handle_count = j) == MAX_EXIT_HANDLES) {
+ res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
+ if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
+ i = (res - WAIT_OBJECT_0);
+ handle_count = MAX_EXIT_HANDLES - 1;
+ for (; i < handle_count; ++i) {
+ handles[i] = handles[i + 1];
+ }
+ } else {
+ warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
+ // Don't keep handles, if we failed waiting for them.
+ for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
+ CloseHandle(handles[i]);
+ }
+ handle_count = 0;
+ }
+ }
+
+ // Store a duplicate of the current thread handle in the array of handles.
+ hproc = GetCurrentProcess();
+ hthr = GetCurrentThread();
+ if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
+ 0, FALSE, DUPLICATE_SAME_ACCESS)) {
+ warning("DuplicateHandle failed in %s: %d\n", __FILE__, __LINE__);
+ } else {
+ ++handle_count;
+ }
+
+ // The current exiting thread has stored its handle in the array, and now
+ // should leave the critical section before calling _endthreadex().
+
+ } else { // what != EPT_THREAD
+ if (handle_count > 0) {
+ // Before ending the process, make sure all the threads that had called
+ // _endthreadex() completed.
+ res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
+ if (res == WAIT_FAILED) {
+ warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
+ }
+ for (i = 0; i < handle_count; ++i) {
+ CloseHandle(handles[i]);
+ }
+ handle_count = 0;
+ }
+
+ // End the process, not leaving critical section.
+ // This makes sure no other thread executes exit-related code at the same
+ // time, thus a race is avoided.
+ if (what == EPT_PROCESS) {
+ ::exit(exit_code);
+ } else {
+ _exit(exit_code);
+ }
+ }
+
+ LeaveCriticalSection(&crit_sect);
}
}
- switch (what) {
- case EPT_THREAD:
+ // We are here if either
+ // - there's no 'race at exit' bug on this OS release;
+ // - initialization of the critical section failed (unlikely);
+ // - the current thread has stored its handle and left the critical section.
+ if (what == EPT_THREAD) {
_endthreadex((unsigned)exit_code);
- break;
-
- case EPT_PROCESS:
+ } else if (what == EPT_PROCESS) {
::exit(exit_code);
- break;
-
- case EPT_PROCESS_DIE:
+ } else {
_exit(exit_code);
- break;
- }
-
- // should not reach here
+ }
+
+ // Should not reach here
return exit_code;
}
-#undef MIN_EXIT_MUTEXES
-#undef MAX_EXIT_MUTEXES
+#undef MAX_EXIT_HANDLES
+#undef EXIT_TIMEOUT
void os::win32::setmode_streams() {
_setmode(_fileno(stdin), _O_BINARY);
@@ -4047,10 +4154,6 @@
return JNI_OK;
}
-void os::init_3(void) {
- return;
-}
-
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
DWORD old_status;
@@ -4792,27 +4895,46 @@
// 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
// into a single win32 CreateEvent() handle.
//
+// Assumption:
+// Only one parker can exist on an event, which is why we allocate
+// them per-thread. Multiple unparkers can coexist.
+//
// _Event transitions in park()
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
-// 0 => -1 : block
+// 0 => -1 : block; then set _Event to 0 before returning
+//
+// _Event transitions in unpark()
+// 0 => 1 : just return
+// 1 => 1 : just return
+// -1 => either 0 or 1; must signal target thread
+// That is, we can safely transition _Event from -1 to either
+// 0 or 1.
//
-// _Event serves as a restricted-range semaphore :
-// -1 : thread is blocked
-// 0 : neutral - thread is running or ready
-// 1 : signaled - thread is running or ready
+// _Event serves as a restricted-range semaphore.
+// -1 : thread is blocked, i.e. there is a waiter
+// 0 : neutral: thread is running or ready,
+// could have been signaled after a wait started
+// 1 : signaled - thread is running or ready
//
-// Another possible encoding of _Event would be
-// with explicit "PARKED" and "SIGNALED" bits.
+// Another possible encoding of _Event would be with
+// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
+//
int os::PlatformEvent::park(jlong Millis) {
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
guarantee(_ParkHandle != NULL , "Invariant");
guarantee(Millis > 0 , "Invariant");
- int v;
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
// the initial park() operation.
-
+ // Consider: use atomic decrement instead of CAS-loop
+
+ int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
@@ -4860,9 +4982,15 @@
}
void os::PlatformEvent::park() {
+ // Transitions for _Event:
+ // -1 => -1 : illegal
+ // 1 => 0 : pass - return immediately
+ // 0 => -1 : block; then set _Event to 0 before returning
+
guarantee(_ParkHandle != NULL, "Invariant");
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
+ // Consider: use atomic decrement instead of CAS-loop
int v;
for (;;) {
v = _Event;
@@ -4891,11 +5019,11 @@
guarantee(_ParkHandle != NULL, "Invariant");
// Transitions for _Event:
- // 0 :=> 1
- // 1 :=> 1
- // -1 :=> either 0 or 1; must signal target thread
- // That is, we can safely transition _Event from -1 to either
- // 0 or 1.
+ // 0 => 1 : just return
+ // 1 => 1 : just return
+ // -1 => either 0 or 1; must signal target thread
+ // That is, we can safely transition _Event from -1 to either
+ // 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@@ -5091,39 +5219,14 @@
return ::closesocket(fd);
}
-int os::socket_available(int fd, jint *pbytes) {
- int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
- return (ret < 0) ? 0 : 1;
-}
-
int os::socket(int domain, int type, int protocol) {
return ::socket(domain, type, protocol);
}
-int os::listen(int fd, int count) {
- return ::listen(fd, count);
-}
-
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
return ::connect(fd, him, len);
}
-int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
- return ::accept(fd, him, len);
-}
-
-int os::sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr* to, socklen_t tolen) {
-
- return ::sendto(fd, buf, (int)len, flags, to, tolen);
-}
-
-int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
- sockaddr* from, socklen_t* fromlen) {
-
- return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
-}
-
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
return ::recv(fd, buf, (int)nBytes, flags);
}
@@ -5136,45 +5239,6 @@
return ::send(fd, buf, (int)nBytes, flags);
}
-int os::timeout(int fd, long timeout) {
- fd_set tbl;
- struct timeval t;
-
- t.tv_sec = timeout / 1000;
- t.tv_usec = (timeout % 1000) * 1000;
-
- tbl.fd_count = 1;
- tbl.fd_array[0] = fd;
-
- return ::select(1, &tbl, 0, 0, &t);
-}
-
-int os::get_host_name(char* name, int namelen) {
- return ::gethostname(name, namelen);
-}
-
-int os::socket_shutdown(int fd, int howto) {
- return ::shutdown(fd, howto);
-}
-
-int os::bind(int fd, struct sockaddr* him, socklen_t len) {
- return ::bind(fd, him, len);
-}
-
-int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
- return ::getsockname(fd, him, len);
-}
-
-int os::get_sock_opt(int fd, int level, int optname,
- char* optval, socklen_t* optlen) {
- return ::getsockopt(fd, level, optname, optval, optlen);
-}
-
-int os::set_sock_opt(int fd, int level, int optname,
- const char* optval, socklen_t optlen) {
- return ::setsockopt(fd, level, optname, optval, optlen);
-}
-
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(IA32)
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
@@ -5367,11 +5431,6 @@
return ::Module32Next(hSnapshot, lpme);
}
-
-inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
- return true;
-}
-
inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
::GetNativeSystemInfo(lpSystemInfo);
}
--- a/hotspot/src/os/windows/vm/os_windows.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -210,7 +210,6 @@
static BOOL Module32First(HANDLE,LPMODULEENTRY32);
static BOOL Module32Next(HANDLE,LPMODULEENTRY32);
- static BOOL GetNativeSystemInfoAvailable();
static void GetNativeSystemInfo(LPSYSTEM_INFO);
// NUMA calls
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -910,7 +910,7 @@
*/
char* hint = (char*) (Linux::initial_thread_stack_bottom() -
((StackYellowPages + StackRedPages + 1) * page_size));
- char* codebuf = os::reserve_memory(page_size, hint);
+ char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
return; // No matter, we tried, best effort.
}
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -33,18 +33,50 @@
#include <sys/systeminfo.h>
#include <kstat.h>
#include <picl.h>
+#include <dlfcn.h>
+#include <link.h>
-extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
-extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
+extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result);
+
+// Functions from the library we need (signatures should match those in picl.h)
+extern "C" {
+ typedef int (*picl_initialize_func_t)(void);
+ typedef int (*picl_shutdown_func_t)(void);
+ typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle);
+ typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth,
+ const char *classname, void *c_args,
+ int (*callback_fn)(picl_nodehdl_t hdl, void *args));
+ typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm,
+ picl_prophdl_t *ph);
+ typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz);
+ typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi);
+}
class PICL {
+ // Pointers to functions in the library
+ picl_initialize_func_t _picl_initialize;
+ picl_shutdown_func_t _picl_shutdown;
+ picl_get_root_func_t _picl_get_root;
+ picl_walk_tree_by_class_func_t _picl_walk_tree_by_class;
+ picl_get_prop_by_name_func_t _picl_get_prop_by_name;
+ picl_get_propval_func_t _picl_get_propval;
+ picl_get_propinfo_func_t _picl_get_propinfo;
+ // Handle to the library that is returned by dlopen
+ void *_dl_handle;
+
+ bool open_library();
+ void close_library();
+
+ template<typename FuncType> bool bind(FuncType& func, const char* name);
+ bool bind_library_functions();
+
// Get a value of the integer property. The value in the tree can be either 32 or 64 bit
// depending on the platform. The result is converted to int.
- static int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
+ int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
picl_propinfo_t pinfo;
picl_prophdl_t proph;
- if (picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
- picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
+ if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
+ _picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
return PICL_FAILURE;
}
@@ -54,13 +86,13 @@
}
if (pinfo.size == sizeof(int64_t)) {
int64_t val;
- if (picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
+ if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
} else if (pinfo.size == sizeof(int32_t)) {
int32_t val;
- if (picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
+ if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
return PICL_FAILURE;
}
*result = static_cast<int>(val);
@@ -74,6 +106,7 @@
// Visitor and a state machine that visits integer properties and verifies that the
// values are the same. Stores the unique value observed.
class UniqueValueVisitor {
+ PICL *_picl;
enum {
INITIAL, // Start state, no assignments happened
ASSIGNED, // Assigned a value
@@ -81,7 +114,7 @@
} _state;
int _value;
public:
- UniqueValueVisitor() : _state(INITIAL) { }
+ UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { }
int value() {
assert(_state == ASSIGNED, "Precondition");
return _value;
@@ -96,71 +129,125 @@
bool is_inconsistent() { return _state == INCONSISTENT; }
void set_inconsistent() { _state = INCONSISTENT; }
- static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) {
- UniqueValueVisitor *state = static_cast<UniqueValueVisitor*>(arg);
- assert(!state->is_inconsistent(), "Precondition");
+ void visit(picl_nodehdl_t nodeh, const char* name) {
+ assert(!is_inconsistent(), "Precondition");
int curr;
- if (PICL::get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
- if (!state->is_assigned()) { // first iteration
- state->set_value(curr);
- } else if (curr != state->value()) { // following iterations
- state->set_inconsistent();
+ if (_picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
+ if (!is_assigned()) { // first iteration
+ set_value(curr);
+ } else if (curr != value()) { // following iterations
+ set_inconsistent();
}
}
- if (state->is_inconsistent()) {
+ }
+ };
+
+ class CPUVisitor {
+ UniqueValueVisitor _l1_visitor;
+ UniqueValueVisitor _l2_visitor;
+ int _limit; // number of times visit() can be run
+ public:
+ CPUVisitor(PICL *picl, int limit) : _l1_visitor(picl), _l2_visitor(picl), _limit(limit) {}
+ static int visit(picl_nodehdl_t nodeh, void *arg) {
+ CPUVisitor *cpu_visitor = static_cast<CPUVisitor*>(arg);
+ UniqueValueVisitor* l1_visitor = cpu_visitor->l1_visitor();
+ UniqueValueVisitor* l2_visitor = cpu_visitor->l2_visitor();
+ if (!l1_visitor->is_inconsistent()) {
+ l1_visitor->visit(nodeh, "l1-dcache-line-size");
+ }
+ if (!l2_visitor->is_inconsistent()) {
+ l2_visitor->visit(nodeh, "l2-cache-line-size");
+ }
+
+ if (l1_visitor->is_inconsistent() && l2_visitor->is_inconsistent()) {
+ return PICL_WALK_TERMINATE;
+ }
+ cpu_visitor->_limit--;
+ if (cpu_visitor->_limit <= 0) {
return PICL_WALK_TERMINATE;
}
return PICL_WALK_CONTINUE;
}
+ UniqueValueVisitor* l1_visitor() { return &_l1_visitor; }
+ UniqueValueVisitor* l2_visitor() { return &_l2_visitor; }
};
-
int _L1_data_cache_line_size;
int _L2_cache_line_size;
public:
- static int get_l1_data_cache_line_size(picl_nodehdl_t nodeh, void *state) {
- return UniqueValueVisitor::visit(nodeh, "l1-dcache-line-size", state);
- }
- static int get_l2_cache_line_size(picl_nodehdl_t nodeh, void *state) {
- return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state);
+ static int visit_cpu(picl_nodehdl_t nodeh, void *state) {
+ return CPUVisitor::visit(nodeh, state);
}
- PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0) {
- if (picl_initialize() == PICL_SUCCESS) {
+ PICL(bool is_fujitsu) : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
+ if (!open_library()) {
+ return;
+ }
+ if (_picl_initialize() == PICL_SUCCESS) {
picl_nodehdl_t rooth;
- if (picl_get_root(&rooth) == PICL_SUCCESS) {
- UniqueValueVisitor L1_state;
- // Visit all "cpu" class instances
- picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
- if (L1_state.is_initial()) { // Still initial, iteration found no values
- // Try walk all "core" class instances, it might be a Fujitsu machine
- picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
+ if (_picl_get_root(&rooth) == PICL_SUCCESS) {
+ const char* cpu_class = "cpu";
+ // If it's a Fujitsu machine, it's a "core"
+ if (is_fujitsu) {
+ cpu_class = "core";
}
- if (L1_state.is_assigned()) { // Is there a value?
- _L1_data_cache_line_size = L1_state.value();
+ CPUVisitor cpu_visitor(this, os::processor_count());
+ _picl_walk_tree_by_class(rooth, cpu_class, &cpu_visitor, PICL_visit_cpu_helper);
+ if (cpu_visitor.l1_visitor()->is_assigned()) { // Is there a value?
+ _L1_data_cache_line_size = cpu_visitor.l1_visitor()->value();
}
-
- UniqueValueVisitor L2_state;
- picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
- if (L2_state.is_initial()) {
- picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
- }
- if (L2_state.is_assigned()) {
- _L2_cache_line_size = L2_state.value();
+ if (cpu_visitor.l2_visitor()->is_assigned()) {
+ _L2_cache_line_size = cpu_visitor.l2_visitor()->value();
}
}
- picl_shutdown();
+ _picl_shutdown();
}
+ close_library();
}
unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
unsigned int L2_cache_line_size() const { return _L2_cache_line_size; }
};
-extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
- return PICL::get_l1_data_cache_line_size(nodeh, result);
+
+extern "C" static int PICL_visit_cpu_helper(picl_nodehdl_t nodeh, void *result) {
+ return PICL::visit_cpu(nodeh, result);
+}
+
+template<typename FuncType>
+bool PICL::bind(FuncType& func, const char* name) {
+ func = reinterpret_cast<FuncType>(dlsym(_dl_handle, name));
+ return func != NULL;
}
-extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
- return PICL::get_l2_cache_line_size(nodeh, result);
+
+bool PICL::bind_library_functions() {
+ assert(_dl_handle != NULL, "library should be open");
+ return bind(_picl_initialize, "picl_initialize" ) &&
+ bind(_picl_shutdown, "picl_shutdown" ) &&
+ bind(_picl_get_root, "picl_get_root" ) &&
+ bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") &&
+ bind(_picl_get_prop_by_name, "picl_get_prop_by_name" ) &&
+ bind(_picl_get_propval, "picl_get_propval" ) &&
+ bind(_picl_get_propinfo, "picl_get_propinfo" );
+}
+
+bool PICL::open_library() {
+ _dl_handle = dlopen("libpicl.so.1", RTLD_LAZY);
+ if (_dl_handle == NULL) {
+ warning("PICL (libpicl.so.1) is missing. Performance will not be optimal.");
+ return false;
+ }
+ if (!bind_library_functions()) {
+ assert(false, "unexpected PICL API change");
+ close_library();
+ return false;
+ }
+ return true;
+}
+
+void PICL::close_library() {
+ assert(_dl_handle != NULL, "library should be open");
+ dlclose(_dl_handle);
+ _dl_handle = NULL;
}
// We need to keep these here as long as we have to build on Solaris
@@ -342,7 +429,7 @@
}
// Figure out cache line sizes using PICL
- PICL picl;
+ PICL picl((features & sparc64_family_m) != 0);
_L1_data_cache_line_size = picl.L1_data_cache_line_size();
_L2_cache_line_size = picl.L2_cache_line_size();
--- a/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Wed Jul 05 20:07:30 2017 +0200
@@ -411,7 +411,7 @@
"/export:jio_vsnprintf "+
"/export:JVM_GetVersionInfo "+
"/export:JVM_InitAgentProperties");
- addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib");
+ addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
addAttr(rv, "OutputFile", outDll);
addAttr(rv, "SuppressStartupBanner", "true");
addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1093,9 +1093,8 @@
// JVMTI -- compiled method notification (must be done outside lock)
nm->post_compiled_method_load_event();
} else {
- // The CodeCache is full. Print out warning and disable compilation.
+ // The CodeCache is full.
record_failure("code cache is full");
- CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
}
}
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -2557,7 +2557,7 @@
Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
- bool* has_default_methods,
+ bool* declares_default_methods,
TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_NULL); // length
@@ -2576,11 +2576,11 @@
if (method->is_final()) {
*has_final_method = true;
}
- if (is_interface && !(*has_default_methods)
- && !method->is_abstract() && !method->is_static()
- && !method->is_private()) {
- // default method
- *has_default_methods = true;
+ // declares_default_methods: declares concrete instance methods, any access flags
+ // used for interface initialization, and default method inheritance analysis
+ if (is_interface && !(*declares_default_methods)
+ && !method->is_abstract() && !method->is_static()) {
+ *declares_default_methods = true;
}
_methods->at_put(index, method());
}
@@ -3739,6 +3739,7 @@
JvmtiCachedClassFileData *cached_class_file = NULL;
Handle class_loader(THREAD, loader_data->class_loader());
bool has_default_methods = false;
+ bool declares_default_methods = false;
ResourceMark rm(THREAD);
ClassFileStream* cfs = stream();
@@ -3976,9 +3977,13 @@
Array<Method*>* methods = parse_methods(access_flags.is_interface(),
&promoted_flags,
&has_final_method,
- &has_default_methods,
+ &declares_default_methods,
CHECK_(nullHandle));
+ if (declares_default_methods) {
+ has_default_methods = true;
+ }
+
// Additional attributes
ClassAnnotationCollector parsed_annotations;
parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
@@ -4120,6 +4125,7 @@
this_klass->set_minor_version(minor_version);
this_klass->set_major_version(major_version);
this_klass->set_has_default_methods(has_default_methods);
+ this_klass->set_declares_default_methods(declares_default_methods);
if (!host_klass.is_null()) {
assert (this_klass->is_anonymous(), "should be the same");
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -247,7 +247,7 @@
Array<Method*>* parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
- bool* has_default_method,
+ bool* declares_default_methods,
TRAPS);
intArray* sort_methods(Array<Method*>* methods);
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -553,6 +553,7 @@
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
bool ClassLoaderDataGraph::_should_purge = false;
+bool ClassLoaderDataGraph::_metaspace_oom = false;
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
@@ -804,12 +805,17 @@
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;
+ bool classes_unloaded = false;
while (next != NULL) {
ClassLoaderData* purge_me = next;
next = purge_me->next();
delete purge_me;
+ classes_unloaded = true;
}
- Metaspace::purge();
+ if (classes_unloaded) {
+ Metaspace::purge();
+ set_metaspace_oom(false);
+ }
}
void ClassLoaderDataGraph::post_class_unload_events(void) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -68,6 +68,9 @@
static ClassLoaderData* _saved_head;
static ClassLoaderData* _saved_unloading;
static bool _should_purge;
+ // OOM has been seen in metaspace allocation. Used to prevent some
+ // allocations until class unloading
+ static bool _metaspace_oom;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
@@ -107,6 +110,9 @@
}
}
+ static bool has_metaspace_oom() { return _metaspace_oom; }
+ static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
+
static void free_deallocate_lists();
static void dump_on(outputStream * const out) PRODUCT_RETURN;
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -455,6 +455,7 @@
template(object_void_signature, "(Ljava/lang/Object;)V") \
template(object_int_signature, "(Ljava/lang/Object;)I") \
template(object_boolean_signature, "(Ljava/lang/Object;)Z") \
+ template(object_object_signature, "(Ljava/lang/Object;)Ljava/lang/Object;") \
template(string_void_signature, "(Ljava/lang/String;)V") \
template(string_int_signature, "(Ljava/lang/String;)I") \
template(throwable_void_signature, "(Ljava/lang/Throwable;)V") \
@@ -746,6 +747,8 @@
do_name( isPrimitive_name, "isPrimitive") \
do_intrinsic(_getSuperclass, java_lang_Class, getSuperclass_name, void_class_signature, F_RN) \
do_name( getSuperclass_name, "getSuperclass") \
+ do_intrinsic(_Class_cast, java_lang_Class, Class_cast_name, object_object_signature, F_R) \
+ do_name( Class_cast_name, "cast") \
\
do_intrinsic(_getClassAccessFlags, sun_reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN) \
do_name( getClassAccessFlags_name, "getClassAccessFlags") \
--- a/hotspot/src/share/vm/code/codeBlob.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/codeBlob.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -229,8 +229,8 @@
return blob;
}
-void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
- return CodeCache::allocate(size, CodeBlobType::NonNMethod, is_critical);
+void* BufferBlob::operator new(size_t s, unsigned size) throw() {
+ return CodeCache::allocate(size, CodeBlobType::NonNMethod);
}
void BufferBlob::free(BufferBlob *blob) {
@@ -260,10 +260,7 @@
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- // The parameter 'true' indicates a critical memory allocation.
- // This means that CodeCacheMinimumFreeSpace is used, if necessary
- const bool is_critical = true;
- blob = new (size, is_critical) AdapterBlob(size, cb);
+ blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@@ -285,10 +282,7 @@
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- // The parameter 'true' indicates a critical memory allocation.
- // This means that CodeCacheMinimumFreeSpace is used, if necessary
- const bool is_critical = true;
- blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
+ blob = new (size) MethodHandlesAdapterBlob(size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@@ -336,14 +330,14 @@
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
- void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
+ void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
- void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
+ void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
--- a/hotspot/src/share/vm/code/codeBlob.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/codeBlob.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -221,7 +221,7 @@
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
- void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
+ void* operator new(size_t s, unsigned size) throw();
public:
// Creation
--- a/hotspot/src/share/vm/code/codeCache.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/codeCache.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -44,6 +44,7 @@
#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
+#include "runtime/sweeper.hpp"
#include "runtime/compilationPolicy.hpp"
#include "services/memoryService.hpp"
#include "trace/tracing.hpp"
@@ -192,16 +193,16 @@
}
// Make sure we have enough space for VM internal code
- uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+ uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
}
guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
// Align reserved sizes of CodeHeaps
- size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
- size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
- size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
+ size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
+ size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
+ size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
// Compute initial sizes of CodeHeaps
size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
@@ -267,6 +268,22 @@
}
}
+const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
+ switch(code_blob_type) {
+ case CodeBlobType::NonNMethod:
+ return "NonNMethodCodeHeapSize";
+ break;
+ case CodeBlobType::MethodNonProfiled:
+ return "NonProfiledCodeHeapSize";
+ break;
+ case CodeBlobType::MethodProfiled:
+ return "ProfiledCodeHeapSize";
+ break;
+ }
+ ShouldNotReachHere();
+ return NULL;
+}
+
void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
// Check if heap is needed
if (!heap_available(code_blob_type)) {
@@ -332,14 +349,18 @@
return next_blob(get_code_heap(cb), cb);
}
-CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
- // Do not seize the CodeCache lock here--if the caller has not
- // already done so, we are going to lose bigtime, since the code
- // cache will contain a garbage CodeBlob until the caller can
- // run the constructor for the CodeBlob subclass he is busy
- // instantiating.
+/**
+ * Do not seize the CodeCache lock here--if the caller has not
+ * already done so, we are going to lose bigtime, since the code
+ * cache will contain a garbage CodeBlob until the caller can
+ * run the constructor for the CodeBlob subclass he is busy
+ * instantiating.
+ */
+CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
+ // Possibly wakes up the sweeper thread.
+ NMethodSweeper::notify(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
- assert(size > 0, "allocation request must be reasonable");
+ assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
if (size <= 0) {
return NULL;
}
@@ -350,14 +371,18 @@
assert(heap != NULL, "heap is null");
while (true) {
- cb = (CodeBlob*)heap->allocate(size, is_critical);
+ cb = (CodeBlob*)heap->allocate(size);
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
- // Fallback solution: Store non-nmethod code in the non-profiled code heap
- return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
+ // Fallback solution: Store non-nmethod code in the non-profiled code heap.
+ // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
+ // code heap and force stack scanning if less than 10% if the code heap are free.
+ return allocate(size, CodeBlobType::MethodNonProfiled);
}
+ MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CompileBroker::handle_full_code_cache(code_blob_type);
return NULL;
}
if (PrintCodeCacheExtension) {
@@ -755,19 +780,6 @@
}
/**
- * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
- */
-bool CodeCache::is_full(int* code_blob_type) {
- FOR_ALL_HEAPS(heap) {
- if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
- *code_blob_type = (*heap)->code_blob_type();
- return true;
- }
- }
- return false;
-}
-
-/**
* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
* is free, reverse_free_ratio() returns 4.
*/
@@ -776,9 +788,13 @@
if (heap == NULL) {
return 0;
}
- double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
+
+ double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
double max_capacity = (double)heap->max_capacity();
- return max_capacity / unallocated_capacity;
+ double result = max_capacity / unallocated_capacity;
+ assert (max_capacity >= unallocated_capacity, "Must be");
+ assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
+ return result;
}
size_t CodeCache::bytes_allocated_in_freelists() {
@@ -1011,9 +1027,8 @@
// Not yet reported for this heap, report
heap->report_full();
if (SegmentedCodeCache) {
- warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
- warning("Try increasing the code heap size using -XX:%s=",
- (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
+ warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
+ warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
} else {
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
--- a/hotspot/src/share/vm/code/codeCache.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/codeCache.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -100,6 +100,8 @@
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
+ // Returns the name of the VM option to set the size of the corresponding CodeHeap
+ static const char* get_code_heap_flag_name(int code_blob_type);
static bool heap_available(int code_blob_type); // Returns true if an own CodeHeap for the given CodeBlobType is available
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
@@ -118,16 +120,16 @@
static void initialize();
// Allocation/administration
- static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
- static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
- static int alignment_unit(); // guaranteed alignment of all CodeBlobs
- static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
- static void free(CodeBlob* cb); // frees a CodeBlob
- static bool contains(void *p); // returns whether p is included
- static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
- static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
- static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
- static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
+ static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
+ static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
+ static int alignment_unit(); // guaranteed alignment of all CodeBlobs
+ static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
+ static void free(CodeBlob* cb); // frees a CodeBlob
+ static bool contains(void *p); // returns whether p is included
+ static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
+ static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
+ static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
+ static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
// Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
@@ -180,7 +182,6 @@
static size_t unallocated_capacity();
static size_t max_capacity();
- static bool is_full(int* code_blob_type);
static double reverse_free_ratio(int code_blob_type);
static bool needs_cache_clean() { return _needs_cache_clean; }
--- a/hotspot/src/share/vm/code/nmethod.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -804,10 +804,7 @@
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
- // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
- // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
- bool is_critical = SegmentedCodeCache;
- return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
+ return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
}
nmethod::nmethod(
--- a/hotspot/src/share/vm/code/vtableStubs.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -63,7 +63,6 @@
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
_chunk = blob->content_begin();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -156,8 +156,6 @@
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
-GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
-
class CompilationLog : public StringEventLog {
public:
@@ -187,6 +185,14 @@
lm.print("\n");
log(thread, "%s", (const char*)lm);
}
+
+ void log_metaspace_failure(const char* reason) {
+ ResourceMark rm;
+ StringLogMessage lm;
+ lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason);
+ lm.print("\n");
+ log(JavaThread::current(), "%s", (const char*)lm);
+ }
};
static CompilationLog* _compilation_log = NULL;
@@ -649,13 +655,10 @@
lock()->notify_all();
}
-// ------------------------------------------------------------------
-// CompileQueue::get
-//
-// Get the next CompileTask from a CompileQueue
+/**
+ * Get the next CompileTask from a CompileQueue
+ */
CompileTask* CompileQueue::get() {
- NMethodSweeper::possibly_sweep();
-
MutexLocker locker(lock());
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
@@ -668,35 +671,16 @@
return NULL;
}
- if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
- // Wait a certain amount of time to possibly do another sweep.
- // We must wait until stack scanning has happened so that we can
- // transition a method's state from 'not_entrant' to 'zombie'.
- long wait_time = NmethodSweepCheckInterval * 1000;
- if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
- // Only one thread at a time can do sweeping. Scale the
- // wait time according to the number of compiler threads.
- // As a result, the next sweep is likely to happen every 100ms
- // with an arbitrary number of threads that do sweeping.
- wait_time = 100 * CICompilerCount;
- }
- bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
- if (timeout) {
- MutexUnlocker ul(lock());
- NMethodSweeper::possibly_sweep();
- }
- } else {
- // If there are no compilation tasks and we can compile new jobs
- // (i.e., there is enough free space in the code cache) there is
- // no need to invoke the sweeper. As a result, the hotness of methods
- // remains unchanged. This behavior is desired, since we want to keep
- // the stable state, i.e., we do not want to evict methods from the
- // code cache if it is unnecessary.
- // We need a timed wait here, since compiler threads can exit if compilation
- // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
- // is not critical and we do not want idle compiler threads to wake up too often.
- lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
- }
+ // If there are no compilation tasks and we can compile new jobs
+ // (i.e., there is enough free space in the code cache) there is
+ // no need to invoke the sweeper. As a result, the hotness of methods
+ // remains unchanged. This behavior is desired, since we want to keep
+ // the stable state, i.e., we do not want to evict methods from the
+ // code cache if it is unnecessary.
+ // We need a timed wait here, since compiler threads can exit if compilation
+ // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
+ // is not critical and we do not want idle compiler threads to wake up too often.
+ lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
}
if (CompileBroker::is_compilation_disabled_forever()) {
@@ -886,8 +870,8 @@
_compilers[1] = new SharkCompiler();
#endif // SHARK
- // Start the CompilerThreads
- init_compiler_threads(c1_count, c2_count);
+ // Start the compiler thread(s) and the sweeper thread
+ init_compiler_sweeper_threads(c1_count, c2_count);
// totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean.
{
@@ -991,13 +975,10 @@
}
-CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
- AbstractCompiler* comp, TRAPS) {
- CompilerThread* compiler_thread = NULL;
-
- Klass* k =
- SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(),
- true, CHECK_0);
+JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
+ AbstractCompiler* comp, bool compiler_thread, TRAPS) {
+ JavaThread* thread = NULL;
+ Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
instanceKlassHandle klass (THREAD, k);
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
Handle string = java_lang_String::create_from_str(name, CHECK_0);
@@ -1015,7 +996,11 @@
{
MutexLocker mu(Threads_lock, THREAD);
- compiler_thread = new CompilerThread(queue, counters);
+ if (compiler_thread) {
+ thread = new CompilerThread(queue, counters);
+ } else {
+ thread = new CodeCacheSweeperThread();
+ }
// At this point the new CompilerThread data-races with this startup
// thread (which I believe is the primoridal thread and NOT the VM
// thread). This means Java bytecodes being executed at startup can
@@ -1028,12 +1013,12 @@
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails.
- if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
+ if (thread == NULL || thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
- java_lang_Thread::set_thread(thread_oop(), compiler_thread);
+ java_lang_Thread::set_thread(thread_oop(), thread);
// Note that this only sets the JavaThread _priority field, which by
// definition is limited to Java priorities and not OS priorities.
@@ -1054,24 +1039,26 @@
native_prio = os::java_to_os_priority[NearMaxPriority];
}
}
- os::set_native_priority(compiler_thread, native_prio);
+ os::set_native_priority(thread, native_prio);
java_lang_Thread::set_daemon(thread_oop());
- compiler_thread->set_threadObj(thread_oop());
- compiler_thread->set_compiler(comp);
- Threads::add(compiler_thread);
- Thread::start(compiler_thread);
+ thread->set_threadObj(thread_oop());
+ if (compiler_thread) {
+ thread->as_CompilerThread()->set_compiler(comp);
+ }
+ Threads::add(thread);
+ Thread::start(thread);
}
// Let go of Threads_lock before yielding
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
- return compiler_thread;
+ return thread;
}
-void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
+void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK;
#if !defined(ZERO) && !defined(SHARK)
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
@@ -1088,17 +1075,14 @@
int compiler_count = c1_compiler_count + c2_compiler_count;
- _compiler_threads =
- new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
-
char name_buffer[256];
+ const bool compiler_thread = true;
for (int i = 0; i < c2_compiler_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C2 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// Shark and C2
- CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
- _compiler_threads->append(new_thread);
+ make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
}
for (int i = c2_compiler_count; i < compiler_count; i++) {
@@ -1106,13 +1090,17 @@
sprintf(name_buffer, "C1 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
// C1
- CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
- _compiler_threads->append(new_thread);
+ make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
}
if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
}
+
+ if (MethodFlushing) {
+ // Initialize the sweeper thread
+ make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
+ }
}
@@ -1759,13 +1747,6 @@
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
- // Check if the CodeCache is full
- int code_blob_type = 0;
- if (CodeCache::is_full(&code_blob_type)) {
- // The CodeHeap for code_blob_type is really full
- handle_full_code_cache(code_blob_type);
- }
-
CompileTask* task = queue->get();
if (task == NULL) {
continue;
@@ -1773,8 +1754,9 @@
// Give compiler threads an extra quanta. They tend to be bursty and
// this helps the compiler to finish up the job.
- if( CompilerThreadHintNoPreempt )
+ if (CompilerThreadHintNoPreempt) {
os::hint_no_preempt();
+ }
// trace per thread time and compile statistics
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
@@ -1843,6 +1825,18 @@
warning("Cannot open log file: %s", file_name);
}
+void CompileBroker::log_metaspace_failure() {
+ const char* message = "some methods may not be compiled because metaspace "
+ "is out of memory";
+ if (_compilation_log != NULL) {
+ _compilation_log->log_metaspace_failure(message);
+ }
+ if (PrintCompilation) {
+ tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
+ }
+}
+
+
// ------------------------------------------------------------------
// CompileBroker::set_should_block
//
@@ -2074,8 +2068,10 @@
}
/**
- * The CodeCache is full. Print out warning and disable compilation
- * or try code cache cleaning so compilation can continue later.
+ * The CodeCache is full. Print warning and disable compilation.
+ * Schedule code cache cleaning so compilation can continue later.
+ * This function needs to be called only from CodeCache::allocate(),
+ * since we currently handle a full code cache uniformly.
*/
void CompileBroker::handle_full_code_cache(int code_blob_type) {
UseInterpreter = true;
@@ -2107,10 +2103,6 @@
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
}
- // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
- // without having to consider the state in which the current thread is.
- ThreadInVMfromUnknown in_vm;
- NMethodSweeper::possibly_sweep();
} else {
disable_compilation_forever();
}
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -290,8 +290,6 @@
static CompileQueue* _c2_compile_queue;
static CompileQueue* _c1_compile_queue;
- static GrowableArray<CompilerThread*>* _compiler_threads;
-
// performance counters
static PerfCounter* _perf_total_compilation;
static PerfCounter* _perf_native_compilation;
@@ -339,8 +337,8 @@
static volatile jint _print_compilation_warning;
- static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
- static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
+ static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
+ static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
static bool is_compile_blocking();
@@ -473,6 +471,9 @@
static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
static long get_peak_compilation_time() { return _peak_compilation_time; }
static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
+
+ // Log that compilation profiling is skipped because metaspace is full.
+ static void log_metaspace_failure();
};
#endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -127,41 +127,6 @@
};
-class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
- size_t _num_processed;
- CardTableModRefBS* _ctbs;
- int _histo[256];
-
- public:
- ClearLoggedCardTableEntryClosure() :
- _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
- {
- for (int i = 0; i < 256; i++) _histo[i] = 0;
- }
-
- bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
- unsigned char* ujb = (unsigned char*)card_ptr;
- int ind = (int)(*ujb);
- _histo[ind]++;
-
- *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
- _num_processed++;
-
- return true;
- }
-
- size_t num_processed() { return _num_processed; }
-
- void print_histo() {
- gclog_or_tty->print_cr("Card table value histogram:");
- for (int i = 0; i < 256; i++) {
- if (_histo[i] != 0) {
- gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);
- }
- }
- }
-};
-
class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
private:
size_t _num_processed;
@@ -475,48 +440,6 @@
return !hr->is_humongous();
}
-void G1CollectedHeap::check_ct_logs_at_safepoint() {
- DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- CardTableModRefBS* ct_bs = g1_barrier_set();
-
- // Count the dirty cards at the start.
- CountNonCleanMemRegionClosure count1(this);
- ct_bs->mod_card_iterate(&count1);
- int orig_count = count1.n();
-
- // First clear the logged cards.
- ClearLoggedCardTableEntryClosure clear;
- dcqs.apply_closure_to_all_completed_buffers(&clear);
- dcqs.iterate_closure_all_threads(&clear, false);
- clear.print_histo();
-
- // Now ensure that there's no dirty cards.
- CountNonCleanMemRegionClosure count2(this);
- ct_bs->mod_card_iterate(&count2);
- if (count2.n() != 0) {
- gclog_or_tty->print_cr("Card table has %d entries; %d originally",
- count2.n(), orig_count);
- }
- guarantee(count2.n() == 0, "Card table should be clean.");
-
- RedirtyLoggedCardTableEntryClosure redirty;
- dcqs.apply_closure_to_all_completed_buffers(&redirty);
- dcqs.iterate_closure_all_threads(&redirty, false);
- gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
- clear.num_processed(), orig_count);
- guarantee(redirty.num_processed() == clear.num_processed(),
- err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
- redirty.num_processed(), clear.num_processed()));
-
- CountNonCleanMemRegionClosure count3(this);
- ct_bs->mod_card_iterate(&count3);
- if (count3.n() != orig_count) {
- gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
- orig_count, count3.n());
- guarantee(count3.n() >= orig_count, "Should have restored them all.");
- }
-}
-
// Private class members.
G1CollectedHeap* G1CollectedHeap::_g1h;
@@ -5760,14 +5683,10 @@
// not copied during the pause.
process_discovered_references(n_workers);
- // Weak root processing.
- {
+ if (G1StringDedup::is_enabled()) {
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
- JNIHandles::weak_oops_do(&is_alive, &keep_alive);
- if (G1StringDedup::is_enabled()) {
- G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
- }
+ G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
}
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -797,9 +797,6 @@
// The closure used to refine a single card.
RefineCardTableEntryClosure* _refine_cte_cl;
- // A function to check the consistency of dirty card logs.
- void check_ct_logs_at_safepoint();
-
// A DirtyCardQueueSet that is used to hold cards that contain
// references into the current collection set. This is used to
// update the remembered sets of the regions in the collection
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1077,7 +1077,6 @@
address SignatureHandlerLibrary::set_handler_blob() {
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
if (handler_blob == NULL) {
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
address handler = handler_blob->code_begin();
--- a/hotspot/src/share/vm/memory/heap.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/memory/heap.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -171,13 +171,13 @@
}
-void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
+void* CodeHeap::allocate(size_t instance_size) {
size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satisfy request from freelist
NOT_PRODUCT(verify());
- HeapBlock* block = search_freelist(number_of_segments, is_critical);
+ HeapBlock* block = search_freelist(number_of_segments);
NOT_PRODUCT(verify());
if (block != NULL) {
@@ -191,15 +191,6 @@
// Ensure minimum size for allocation to the heap.
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
- if (!is_critical) {
- // Make sure the allocation fits in the unallocated heap without using
- // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
- if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
- // Fail allocation
- return NULL;
- }
- }
-
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
HeapBlock* b = block_at(_next_segment);
@@ -427,24 +418,17 @@
* Search freelist for an entry on the list with the best fit.
* @return NULL, if no one was found
*/
-FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
+FreeBlock* CodeHeap::search_freelist(size_t length) {
FreeBlock* found_block = NULL;
FreeBlock* found_prev = NULL;
size_t found_length = 0;
FreeBlock* prev = NULL;
FreeBlock* cur = _freelist;
- const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
// Search for first block that fits
while(cur != NULL) {
if (cur->length() >= length) {
- // Non critical allocations are not allowed to use the last part of the code heap.
- // Make sure the end of the allocation doesn't cross into the last part of the code heap.
- if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
- // The freelist is sorted by address - if one fails, all consecutive will also fail.
- break;
- }
// Remember block, its previous element, and its length
found_block = cur;
found_prev = prev;
--- a/hotspot/src/share/vm/memory/heap.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/memory/heap.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -120,7 +120,7 @@
// Toplevel freelist management
void add_to_freelist(HeapBlock* b);
- FreeBlock* search_freelist(size_t length, bool is_critical);
+ FreeBlock* search_freelist(size_t length);
// Iteration helpers
void* next_free(HeapBlock* b) const;
@@ -140,8 +140,8 @@
bool expand_by(size_t size); // expands committed memory by size
// Memory allocation
- void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
- void deallocate(void* p); // deallocates a block
+ void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
+ void deallocate(void* p); // Deallocate memory
// Attributes
char* low_boundary() const { return _memory.low_boundary (); }
--- a/hotspot/src/share/vm/memory/metaspace.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -3157,6 +3157,16 @@
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
+ // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
+ uintx min_misc_code_size = align_size_up(
+ (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
+ (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
+ max_alignment);
+
+ if (SharedMiscCodeSize < min_misc_code_size) {
+ report_out_of_shared_space(SharedMiscCode);
+ }
+
// Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks.
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -714,12 +714,17 @@
int class_list_path_len = (int)strlen(class_list_path_str);
if (class_list_path_len >= 3) {
if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
- strcat(class_list_path_str, os::file_separator());
- strcat(class_list_path_str, "lib");
+ if (class_list_path_len < JVM_MAXPATHLEN - 4) {
+ strncat(class_list_path_str, os::file_separator(), 1);
+ strncat(class_list_path_str, "lib", 3);
+ }
}
}
- strcat(class_list_path_str, os::file_separator());
- strcat(class_list_path_str, "classlist");
+ class_list_path_len = (int)strlen(class_list_path_str);
+ if (class_list_path_len < JVM_MAXPATHLEN - 10) {
+ strncat(class_list_path_str, os::file_separator(), 1);
+ strncat(class_list_path_str, "classlist", 9);
+ }
class_list_path = class_list_path_str;
} else {
class_list_path = SharedClassListFile;
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -57,11 +57,16 @@
static bool _archive_loading_failed;
public:
enum {
- vtbl_list_size = 17, // number of entries in the shared space vtable list.
- num_virtuals = 200 // maximum number of virtual functions
- // If virtual functions are added to Metadata,
- // this number needs to be increased. Also,
- // SharedMiscCodeSize will need to be increased.
+ vtbl_list_size = 17, // number of entries in the shared space vtable list.
+ num_virtuals = 200, // maximum number of virtual functions
+ // If virtual functions are added to Metadata,
+ // this number needs to be increased. Also,
+ // SharedMiscCodeSize will need to be increased.
+ // The following 2 sizes were based on
+ // MetaspaceShared::generate_vtable_methods()
+ vtbl_method_size = 16, // conservative size of the mov1 and jmp instructions
+ // for the x64 platform
+ vtbl_common_code_size = (1*K) // conservative size of the "common_code" for the x64 platform
};
enum {
--- a/hotspot/src/share/vm/oops/constMethod.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/constMethod.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -277,7 +277,7 @@
bool has_stackmap_table() const { return _stackmap_data != NULL; }
void init_fingerprint() {
- const uint64_t initval = CONST64(0x8000000000000000);
+ const uint64_t initval = UCONST64(0x8000000000000000);
_fingerprint = initval;
}
--- a/hotspot/src/share/vm/oops/constantPool.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -206,7 +206,8 @@
}
}
-Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS) {
+Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which,
+ bool save_resolution_error, TRAPS) {
assert(THREAD->is_Java_thread(), "must be a Java thread");
// A resolved constantPool entry will contain a Klass*, otherwise a Symbol*.
@@ -249,7 +250,18 @@
// Failed to resolve class. We must record the errors so that subsequent attempts
// to resolve this constant pool entry fail with the same error (JVMS 5.4.3).
if (HAS_PENDING_EXCEPTION) {
- save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_0);
+ if (save_resolution_error) {
+ save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_NULL);
+ // If CHECK_NULL above doesn't return the exception, that means that
+ // some other thread has beaten us and has resolved the class.
+ // To preserve old behavior, we return the resolved class.
+ entry = this_cp->resolved_klass_at(which);
+ assert(entry.is_resolved(), "must be resolved if exception was cleared");
+ assert(entry.get_klass()->is_klass(), "must be resolved to a klass");
+ return entry.get_klass();
+ } else {
+ return NULL; // return the pending exception
+ }
}
// Make this class loader depend upon the class loader owning the class reference
@@ -260,10 +272,10 @@
// skip resolving the constant pool so that this code gets
// called the next time some bytecodes refer to this class.
trace_class_resolution(this_cp, k);
- return k();
- } else {
- this_cp->klass_at_put(which, k());
- }
+ return k();
+ } else {
+ this_cp->klass_at_put(which, k());
+ }
entry = this_cp->resolved_klass_at(which);
assert(entry.is_resolved() && entry.get_klass()->is_klass(), "must be resolved at this point");
@@ -573,24 +585,25 @@
Symbol* message = exception_message(this_cp, which, tag, PENDING_EXCEPTION);
SystemDictionary::add_resolution_error(this_cp, which, error, message);
// CAS in the tag. If a thread beat us to registering this error that's fine.
- // If another thread resolved the reference, this is an error. The resolution
- // must deterministically get an error. So why do we save this?
- // We save this because jvmti can add classes to the bootclass path after this
- // error, so it needs to get the same error if the error is first.
+ // If another thread resolved the reference, this is a race condition. This
+ // thread may have had a security manager or something temporary.
+ // This doesn't deterministically get an error. So why do we save this?
+ // We save this because jvmti can add classes to the bootclass path after
+ // this error, so it needs to get the same error if the error is first.
jbyte old_tag = Atomic::cmpxchg((jbyte)error_tag,
(jbyte*)this_cp->tag_addr_at(which), (jbyte)tag.value());
- assert(old_tag == error_tag || old_tag == tag.value(), "should not be resolved otherwise");
+ if (old_tag != error_tag && old_tag != tag.value()) {
+ // MethodHandles and MethodType doesn't change to resolved version.
+ assert(this_cp->tag_at(which).is_klass(), "Wrong tag value");
+ // Forget the exception and use the resolved class.
+ CLEAR_PENDING_EXCEPTION;
+ }
} else {
// some other thread put this in error state
throw_resolution_error(this_cp, which, CHECK);
}
-
- // This exits with some pending exception
- assert(HAS_PENDING_EXCEPTION, "should not be cleared");
}
-
-
// Called to resolve constants in the constant pool and return an oop.
// Some constant pool entries cache their resolved oop. This is also
// called to create oops from constants to use in arguments for invokedynamic
@@ -627,7 +640,7 @@
case JVM_CONSTANT_Class:
{
assert(cache_index == _no_index_sentinel, "should not have been set");
- Klass* resolved = klass_at_impl(this_cp, index, CHECK_NULL);
+ Klass* resolved = klass_at_impl(this_cp, index, true, CHECK_NULL);
// ldc wants the java mirror.
result_oop = resolved->java_mirror();
break;
@@ -660,7 +673,7 @@
ref_kind, index, this_cp->method_handle_index_at(index),
callee_index, name->as_C_string(), signature->as_C_string());
KlassHandle callee;
- { Klass* k = klass_at_impl(this_cp, callee_index, CHECK_NULL);
+ { Klass* k = klass_at_impl(this_cp, callee_index, true, CHECK_NULL);
callee = KlassHandle(THREAD, k);
}
KlassHandle klass(THREAD, this_cp->pool_holder());
--- a/hotspot/src/share/vm/oops/constantPool.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/constantPool.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -336,7 +336,13 @@
Klass* klass_at(int which, TRAPS) {
constantPoolHandle h_this(THREAD, this);
- return klass_at_impl(h_this, which, CHECK_NULL);
+ return klass_at_impl(h_this, which, true, CHECK_NULL);
+ }
+
+ // Version of klass_at that doesn't save the resolution error, called during deopt
+ Klass* klass_at_ignore_error(int which, TRAPS) {
+ constantPoolHandle h_this(THREAD, this);
+ return klass_at_impl(h_this, which, false, CHECK_NULL);
}
Symbol* klass_name_at(int which); // Returns the name, w/o resolving.
@@ -793,7 +799,8 @@
// Implementation of methods that needs an exposed 'this' pointer, in order to
// handle GC while executing the method
- static Klass* klass_at_impl(constantPoolHandle this_cp, int which, TRAPS);
+ static Klass* klass_at_impl(constantPoolHandle this_cp, int which,
+ bool save_resolution_error, TRAPS);
static oop string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS);
static void trace_class_resolution(constantPoolHandle this_cp, KlassHandle k);
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -736,6 +736,41 @@
}
}
+// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access)
+void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_k, TRAPS) {
+ if (this_k->has_default_methods()) {
+ for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
+ Klass* iface = this_k->local_interfaces()->at(i);
+ InstanceKlass* ik = InstanceKlass::cast(iface);
+ if (ik->should_be_initialized()) {
+ if (ik->has_default_methods()) {
+ ik->initialize_super_interfaces(ik, THREAD);
+ }
+ // Only initialize() interfaces that "declare" concrete methods.
+ // has_default_methods drives searching superinterfaces since it
+ // means has_default_methods in its superinterface hierarchy
+ if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) {
+ ik->initialize(THREAD);
+ }
+ if (HAS_PENDING_EXCEPTION) {
+ Handle e(THREAD, PENDING_EXCEPTION);
+ CLEAR_PENDING_EXCEPTION;
+ {
+ EXCEPTION_MARK;
+ // Locks object, set state, and notify all waiting threads
+ this_k->set_initialization_state_and_notify(
+ initialization_error, THREAD);
+
+ // ignore any exception thrown, superclass initialization error is
+ // thrown below
+ CLEAR_PENDING_EXCEPTION;
+ }
+ THROW_OOP(e());
+ }
+ }
+ }
+ }
+}
void InstanceKlass::initialize_impl(instanceKlassHandle this_k, TRAPS) {
// Make sure klass is linked (verified) before initialization
@@ -815,33 +850,11 @@
}
}
+ // Recursively initialize any superinterfaces that declare default methods
+ // Only need to recurse if has_default_methods which includes declaring and
+ // inheriting default methods
if (this_k->has_default_methods()) {
- // Step 7.5: initialize any interfaces which have default methods
- for (int i = 0; i < this_k->local_interfaces()->length(); ++i) {
- Klass* iface = this_k->local_interfaces()->at(i);
- InstanceKlass* ik = InstanceKlass::cast(iface);
- if (ik->has_default_methods() && ik->should_be_initialized()) {
- ik->initialize(THREAD);
-
- if (HAS_PENDING_EXCEPTION) {
- Handle e(THREAD, PENDING_EXCEPTION);
- CLEAR_PENDING_EXCEPTION;
- {
- EXCEPTION_MARK;
- // Locks object, set state, and notify all waiting threads
- this_k->set_initialization_state_and_notify(
- initialization_error, THREAD);
-
- // ignore any exception thrown, superclass initialization error is
- // thrown below
- CLEAR_PENDING_EXCEPTION;
- }
- DTRACE_CLASSINIT_PROBE_WAIT(
- super__failed, InstanceKlass::cast(this_k()), -1, wait);
- THROW_OOP(e());
- }
- }
- }
+ this_k->initialize_super_interfaces(this_k, CHECK);
}
// Step 8
@@ -1717,6 +1730,25 @@
return id;
}
+// Figure out how many jmethodIDs haven't been allocated, and make
+// sure space for them is pre-allocated. This makes getting all
+// method ids much, much faster with classes with more than 8
+// methods, and has a *substantial* effect on performance with jvmti
+// code that loads all jmethodIDs for all classes.
+void InstanceKlass::ensure_space_for_methodids(int start_offset) {
+ int new_jmeths = 0;
+ int length = methods()->length();
+ for (int index = start_offset; index < length; index++) {
+ Method* m = methods()->at(index);
+ jmethodID id = m->find_jmethod_id_or_null();
+ if (id == NULL) {
+ new_jmeths++;
+ }
+ }
+ if (new_jmeths != 0) {
+ Method::ensure_jmethod_ids(class_loader_data(), new_jmeths);
+ }
+}
// Common code to fetch the jmethodID from the cache or update the
// cache with the new jmethodID. This function should never do anything
@@ -2486,7 +2518,7 @@
// If this is an anonymous class, append a hash to make the name unique
if (is_anonymous()) {
intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0;
- sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
+ jio_snprintf(hash_buf, sizeof(hash_buf), "/" UINTX_FORMAT, (uintx)hash);
hash_len = (int)strlen(hash_buf);
}
@@ -2779,19 +2811,18 @@
// On-stack replacement stuff
void InstanceKlass::add_osr_nmethod(nmethod* n) {
// only one compilation can be active
- NEEDS_CLEANUP
- // This is a short non-blocking critical region, so the no safepoint check is ok.
- OsrList_lock->lock_without_safepoint_check();
- assert(n->is_osr_method(), "wrong kind of nmethod");
- n->set_osr_link(osr_nmethods_head());
- set_osr_nmethods_head(n);
- // Raise the highest osr level if necessary
- if (TieredCompilation) {
- Method* m = n->method();
- m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
+ {
+ // This is a short non-blocking critical region, so the no safepoint check is ok.
+ MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+ assert(n->is_osr_method(), "wrong kind of nmethod");
+ n->set_osr_link(osr_nmethods_head());
+ set_osr_nmethods_head(n);
+ // Raise the highest osr level if necessary
+ if (TieredCompilation) {
+ Method* m = n->method();
+ m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
+ }
}
- // Remember to unlock again
- OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
@@ -2807,7 +2838,7 @@
void InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
- OsrList_lock->lock_without_safepoint_check();
+ MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = NULL;
nmethod* cur = osr_nmethods_head();
@@ -2844,13 +2875,27 @@
}
m->set_highest_osr_comp_level(max_level);
}
- // Remember to unlock again
- OsrList_lock->unlock();
+}
+
+int InstanceKlass::mark_osr_nmethods(const Method* m) {
+ // This is a short non-blocking critical region, so the no safepoint check is ok.
+ MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+ nmethod* osr = osr_nmethods_head();
+ int found = 0;
+ while (osr != NULL) {
+ assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
+ if (osr->method() == m) {
+ osr->mark_for_deoptimization();
+ found++;
+ }
+ osr = osr->osr_link();
+ }
+ return found;
}
nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok.
- OsrList_lock->lock_without_safepoint_check();
+ MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
nmethod* best = NULL;
while (osr != NULL) {
@@ -2866,14 +2911,12 @@
if (match_level) {
if (osr->comp_level() == comp_level) {
// Found a match - return it.
- OsrList_lock->unlock();
return osr;
}
} else {
if (best == NULL || (osr->comp_level() > best->comp_level())) {
if (osr->comp_level() == CompLevel_highest_tier) {
// Found the best possible - return it.
- OsrList_lock->unlock();
return osr;
}
best = osr;
@@ -2882,7 +2925,6 @@
}
osr = osr->osr_link();
}
- OsrList_lock->unlock();
if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
return best;
}
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -199,13 +199,14 @@
bool _has_unloaded_dependent;
enum {
- _misc_rewritten = 1 << 0, // methods rewritten.
- _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
- _misc_should_verify_class = 1 << 2, // allow caching of preverification
- _misc_is_anonymous = 1 << 3, // has embedded _host_klass field
- _misc_is_contended = 1 << 4, // marked with contended annotation
- _misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
- _misc_has_been_redefined = 1 << 6 // class has been redefined
+ _misc_rewritten = 1 << 0, // methods rewritten.
+ _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
+ _misc_should_verify_class = 1 << 2, // allow caching of preverification
+ _misc_is_anonymous = 1 << 3, // has embedded _host_klass field
+ _misc_is_contended = 1 << 4, // marked with contended annotation
+ _misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
+ _misc_declares_default_methods = 1 << 6, // directly declares default methods (any access)
+ _misc_has_been_redefined = 1 << 7 // class has been redefined
};
u2 _misc_flags;
u2 _minor_version; // minor version number of class file
@@ -651,6 +652,17 @@
}
}
+ bool declares_default_methods() const {
+ return (_misc_flags & _misc_declares_default_methods) != 0;
+ }
+ void set_declares_default_methods(bool b) {
+ if (b) {
+ _misc_flags |= _misc_declares_default_methods;
+ } else {
+ _misc_flags &= ~_misc_declares_default_methods;
+ }
+ }
+
// for adding methods, ConstMethod::UNSET_IDNUM means no more ids available
inline u2 next_method_idnum();
void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; }
@@ -686,6 +698,7 @@
jmethodID** to_dealloc_jmeths_p);
static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum,
size_t *length_p, jmethodID* id_p);
+ void ensure_space_for_methodids(int start_offset = 0);
jmethodID jmethod_id_or_null(Method* method);
// annotations support
@@ -742,6 +755,7 @@
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n);
+ int mark_osr_nmethods(const Method* m);
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on Method* for details)
@@ -1022,6 +1036,7 @@
static bool link_class_impl (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static bool verify_code (instanceKlassHandle this_k, bool throw_verifyerror, TRAPS);
static void initialize_impl (instanceKlassHandle this_k, TRAPS);
+ static void initialize_super_interfaces (instanceKlassHandle this_k, TRAPS);
static void eager_initialize_impl (instanceKlassHandle this_k);
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);
--- a/hotspot/src/share/vm/oops/method.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/method.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -368,6 +368,13 @@
// Build a MethodData* object to hold information about this method
// collected in the interpreter.
void Method::build_interpreter_method_data(methodHandle method, TRAPS) {
+ // Do not profile the method if metaspace has hit an OOM previously
+ // allocating profiling data. Callers clear pending exception so don't
+ // add one here.
+ if (ClassLoaderDataGraph::has_metaspace_oom()) {
+ return;
+ }
+
// Do not profile method if current thread holds the pending list lock,
// which avoids deadlock for acquiring the MethodData_lock.
if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
@@ -379,7 +386,13 @@
MutexLocker ml(MethodData_lock, THREAD);
if (method->method_data() == NULL) {
ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
- MethodData* method_data = MethodData::allocate(loader_data, method, CHECK);
+ MethodData* method_data = MethodData::allocate(loader_data, method, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ CompileBroker::log_metaspace_failure();
+ ClassLoaderDataGraph::set_metaspace_oom(true);
+ return; // return the exception (which is cleared)
+ }
+
method->set_method_data(method_data);
if (PrintMethodData && (Verbose || WizardMode)) {
ResourceMark rm(THREAD);
@@ -392,9 +405,19 @@
}
MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
+ // Do not profile the method if metaspace has hit an OOM previously
+ if (ClassLoaderDataGraph::has_metaspace_oom()) {
+ return NULL;
+ }
+
methodHandle mh(m);
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
- MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL);
+ MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ CompileBroker::log_metaspace_failure();
+ ClassLoaderDataGraph::set_metaspace_oom(true);
+ return NULL; // return the exception (which is cleared)
+ }
if (!mh->init_method_counters(counters)) {
MetadataFactory::free_metadata(loader_data, counters);
}
@@ -1295,6 +1318,10 @@
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
if (id != vmIntrinsics::_none) {
set_intrinsic_id(id);
+ if (id == vmIntrinsics::_Class_cast) {
+ // Even if the intrinsic is rejected, we want to inline this simple method.
+ set_force_inline(true);
+ }
return;
}
@@ -1704,59 +1731,98 @@
// jmethodID handling
// This is a block allocating object, sort of like JNIHandleBlock, only a
-// lot simpler. There aren't many of these, they aren't long, they are rarely
-// deleted and so we can do some suboptimal things.
+// lot simpler.
// It's allocated on the CHeap because once we allocate a jmethodID, we can
// never get rid of it.
-// It would be nice to be able to parameterize the number of methods for
-// the null_class_loader but then we'd have to turn this and ClassLoaderData
-// into templates.
+
+static const int min_block_size = 8;
+
+class JNIMethodBlockNode : public CHeapObj<mtClass> {
+ friend class JNIMethodBlock;
+ Method** _methods;
+ int _number_of_methods;
+ int _top;
+ JNIMethodBlockNode* _next;
+
+ public:
+
+ JNIMethodBlockNode(int num_methods = min_block_size);
-// I feel like this brain dead class should exist somewhere in the STL
+ ~JNIMethodBlockNode() { FREE_C_HEAP_ARRAY(Method*, _methods, mtInternal); }
+
+ void ensure_methods(int num_addl_methods) {
+ if (_top < _number_of_methods) {
+ num_addl_methods -= _number_of_methods - _top;
+ if (num_addl_methods <= 0) {
+ return;
+ }
+ }
+ if (_next == NULL) {
+ _next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size));
+ } else {
+ _next->ensure_methods(num_addl_methods);
+ }
+ }
+};
class JNIMethodBlock : public CHeapObj<mtClass> {
- enum { number_of_methods = 8 };
-
- Method* _methods[number_of_methods];
- int _top;
- JNIMethodBlock* _next;
+ JNIMethodBlockNode _head;
+ JNIMethodBlockNode *_last_free;
public:
static Method* const _free_method;
- JNIMethodBlock() : _next(NULL), _top(0) {
- for (int i = 0; i< number_of_methods; i++) _methods[i] = _free_method;
+ JNIMethodBlock(int initial_capacity = min_block_size)
+ : _head(initial_capacity), _last_free(&_head) {}
+
+ void ensure_methods(int num_addl_methods) {
+ _last_free->ensure_methods(num_addl_methods);
}
Method** add_method(Method* m) {
- if (_top < number_of_methods) {
- // top points to the next free entry.
- int i = _top;
- _methods[i] = m;
- _top++;
- return &_methods[i];
- } else if (_top == number_of_methods) {
- // if the next free entry ran off the block see if there's a free entry
- for (int i = 0; i< number_of_methods; i++) {
- if (_methods[i] == _free_method) {
- _methods[i] = m;
- return &_methods[i];
+ for (JNIMethodBlockNode* b = _last_free; b != NULL; b = b->_next) {
+ if (b->_top < b->_number_of_methods) {
+ // top points to the next free entry.
+ int i = b->_top;
+ b->_methods[i] = m;
+ b->_top++;
+ _last_free = b;
+ return &(b->_methods[i]);
+ } else if (b->_top == b->_number_of_methods) {
+ // if the next free entry ran off the block see if there's a free entry
+ for (int i = 0; i < b->_number_of_methods; i++) {
+ if (b->_methods[i] == _free_method) {
+ b->_methods[i] = m;
+ _last_free = b;
+ return &(b->_methods[i]);
+ }
}
+ // Only check each block once for frees. They're very unlikely.
+ // Increment top past the end of the block.
+ b->_top++;
}
- // Only check each block once for frees. They're very unlikely.
- // Increment top past the end of the block.
- _top++;
+ // need to allocate a next block.
+ if (b->_next == NULL) {
+ b->_next = _last_free = new JNIMethodBlockNode();
+ }
}
- // need to allocate a next block.
- if (_next == NULL) {
- _next = new JNIMethodBlock();
- }
- return _next->add_method(m);
+ guarantee(false, "Should always allocate a free block");
+ return NULL;
}
bool contains(Method** m) {
- for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
- for (int i = 0; i< number_of_methods; i++) {
- if (&(b->_methods[i]) == m) {
+ if (m == NULL) return false;
+ for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
+ if (b->_methods <= m && m < b->_methods + b->_number_of_methods) {
+ // This is a bit of extra checking, for two reasons. One is
+ // that contains() deals with pointers that are passed in by
+ // JNI code, so making sure that the pointer is aligned
+ // correctly is valuable. The other is that <= and > are
+ // technically not defined on pointers, so the if guard can
+ // pass spuriously; no modern compiler is likely to make that
+ // a problem, though (and if one did, the guard could also
+ // fail spuriously, which would be bad).
+ ptrdiff_t idx = m - b->_methods;
+ if (b->_methods + idx == m) {
return true;
}
}
@@ -1775,9 +1841,9 @@
// During class unloading the methods are cleared, which is different
// than freed.
void clear_all_methods() {
- for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
- for (int i = 0; i< number_of_methods; i++) {
- _methods[i] = NULL;
+ for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
+ for (int i = 0; i< b->_number_of_methods; i++) {
+ b->_methods[i] = NULL;
}
}
}
@@ -1785,9 +1851,9 @@
int count_methods() {
// count all allocated methods
int count = 0;
- for (JNIMethodBlock* b = this; b != NULL; b = b->_next) {
- for (int i = 0; i< number_of_methods; i++) {
- if (_methods[i] != _free_method) count++;
+ for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
+ for (int i = 0; i< b->_number_of_methods; i++) {
+ if (b->_methods[i] != _free_method) count++;
}
}
return count;
@@ -1798,6 +1864,36 @@
// Something that can't be mistaken for an address or a markOop
Method* const JNIMethodBlock::_free_method = (Method*)55;
+JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _next(NULL), _top(0) {
+ _number_of_methods = MAX2(num_methods, min_block_size);
+ _methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal);
+ for (int i = 0; i < _number_of_methods; i++) {
+ _methods[i] = JNIMethodBlock::_free_method;
+ }
+}
+
+void Method::ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity) {
+ ClassLoaderData* cld = loader_data;
+ if (!SafepointSynchronize::is_at_safepoint()) {
+ // Have to add jmethod_ids() to class loader data thread-safely.
+ // Also have to add the method to the list safely, which the cld lock
+ // protects as well.
+ MutexLockerEx ml(cld->metaspace_lock(), Mutex::_no_safepoint_check_flag);
+ if (cld->jmethod_ids() == NULL) {
+ cld->set_jmethod_ids(new JNIMethodBlock(capacity));
+ } else {
+ cld->jmethod_ids()->ensure_methods(capacity);
+ }
+ } else {
+ // At safepoint, we are single threaded and can set this.
+ if (cld->jmethod_ids() == NULL) {
+ cld->set_jmethod_ids(new JNIMethodBlock(capacity));
+ } else {
+ cld->jmethod_ids()->ensure_methods(capacity);
+ }
+ }
+}
+
// Add a method id to the jmethod_ids
jmethodID Method::make_jmethod_id(ClassLoaderData* loader_data, Method* m) {
ClassLoaderData* cld = loader_data;
--- a/hotspot/src/share/vm/oops/method.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/method.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -729,6 +729,11 @@
static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
+ // Ensure there is enough capacity in the internal tracking data
+ // structures to hold the number of jmethodIDs you plan to generate.
+ // This saves substantial time doing allocations.
+ static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity);
+
// Use resolve_jmethod_id() in situations where the caller is expected
// to provide a valid jmethodID; the only sanity checks are in asserts;
// result guaranteed not to be NULL.
@@ -813,6 +818,10 @@
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
}
+ int mark_osr_nmethods() {
+ return method_holder()->mark_osr_nmethods(this);
+ }
+
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
}
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -45,9 +45,10 @@
private:
// Give size of objArrayOop in HeapWords minus the header
static int array_size(int length) {
- const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+ const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
"Else the following (new) computation would be in error");
+ uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
#ifdef ASSERT
// The old code is left in for sanity-checking; it'll
// go away pretty soon. XXX
@@ -55,16 +56,15 @@
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
- const int HeapWordsPerOop = heapOopSize/HeapWordSize;
- int old_res;
+ const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
+ uint old_res;
if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop;
} else {
- old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+ old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
}
+ assert(res == old_res, "Inconsistency between old and new.");
#endif // ASSERT
- int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
- assert(res == old_res, "Inconsistency between old and new.");
return res;
}
--- a/hotspot/src/share/vm/oops/typeArrayOop.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/oops/typeArrayOop.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -150,7 +150,7 @@
DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh));
assert(length <= arrayOopDesc::max_array_length(etype), "no overflow");
- julong size_in_bytes = length;
+ julong size_in_bytes = (juint)length;
size_in_bytes <<= element_shift;
size_in_bytes += instance_header_size;
julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize);
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -476,6 +476,9 @@
product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \
\
+ product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \
+ "Abort EA when it reaches time limit (in sec)") \
+ \
develop(bool, ExitEscapeAnalysisOnTimeout, true, \
"Exit or throw assert in EA when it reaches time limit") \
\
--- a/hotspot/src/share/vm/opto/callnode.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/callnode.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -939,7 +939,8 @@
#ifndef PRODUCT
if (!(call->req() > TypeFunc::Parms &&
call->in(TypeFunc::Parms) != NULL &&
- call->in(TypeFunc::Parms)->is_Con())) {
+ call->in(TypeFunc::Parms)->is_Con() &&
+ call->in(TypeFunc::Parms)->bottom_type()->isa_int())) {
assert(in_dump() != 0, "OK if dumping");
tty->print("[bad uncommon trap]");
return 0;
--- a/hotspot/src/share/vm/opto/coalesce.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/coalesce.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -281,9 +281,11 @@
Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
Node *copy;
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
- // Rematerialize constants instead of copying them
- if( m->is_Mach() && m->as_Mach()->is_Con() &&
- m->as_Mach()->rematerialize() ) {
+ // Rematerialize constants instead of copying them.
+ // We do this only for immediate constants, we avoid constant table loads
+ // because that will unsafely extend the live range of the constant table base.
+ if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
+ m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the predecessor basic block
pred->add_inst(copy);
@@ -317,8 +319,8 @@
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// At this point it is unsafe to extend live ranges (6550579).
// Rematerialize only constants as we do for Phi above.
- if(m->is_Mach() && m->as_Mach()->is_Con() &&
- m->as_Mach()->rematerialize()) {
+ if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
+ m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the basic block, just before us
b->insert_node(copy, l++);
--- a/hotspot/src/share/vm/opto/compile.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -535,7 +535,6 @@
if (scratch_buffer_blob() == NULL) {
// Let CompilerBroker disable further compilations.
record_failure("Not enough space for scratch buffer in CodeCache");
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
}
--- a/hotspot/src/share/vm/opto/escape.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -38,6 +38,8 @@
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
+ _in_worklist(C->comp_arena()),
+ _next_pidx(0),
_collecting(true),
_verify(false),
_compile(C),
@@ -125,13 +127,19 @@
if (C->root() != NULL) {
ideal_nodes.push(C->root());
}
+ // Processed ideal nodes are unique on ideal_nodes list
+ // but several ideal nodes are mapped to the phantom_obj.
+ // To avoid duplicated entries on the following worklists
+ // add the phantom_obj only once to them.
+ ptnodes_worklist.append(phantom_obj);
+ java_objects_worklist.append(phantom_obj);
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
Node* n = ideal_nodes.at(next);
// Create PointsTo nodes and add them to Connection Graph. Called
// only once per ideal node since ideal_nodes is Unique_Node list.
add_node_to_connection_graph(n, &delayed_worklist);
PointsToNode* ptn = ptnode_adr(n->_idx);
- if (ptn != NULL) {
+ if (ptn != NULL && ptn != phantom_obj) {
ptnodes_worklist.append(ptn);
if (ptn->is_JavaObject()) {
java_objects_worklist.append(ptn->as_JavaObject());
@@ -415,7 +423,7 @@
}
case Op_CreateEx: {
// assume that all exception objects globally escape
- add_java_object(n, PointsToNode::GlobalEscape);
+ map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadKlass:
@@ -1074,13 +1082,8 @@
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
// Set limit to 20 to catch situation when something did go wrong and
// bailout Escape Analysis.
- // Also limit build time to 30 sec (60 in debug VM).
+ // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
#define CG_BUILD_ITER_LIMIT 20
-#ifdef ASSERT
-#define CG_BUILD_TIME_LIMIT 60.0
-#else
-#define CG_BUILD_TIME_LIMIT 30.0
-#endif
// Propagate GlobalEscape and ArgEscape escape states and check that
// we still have non-escaping objects. The method pushs on _worklist
@@ -1091,12 +1094,13 @@
// Now propagate references to all JavaObject nodes.
int java_objects_length = java_objects_worklist.length();
elapsedTimer time;
+ bool timeout = false;
int new_edges = 1;
int iterations = 0;
do {
while ((new_edges > 0) &&
- (iterations++ < CG_BUILD_ITER_LIMIT) &&
- (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+ (iterations++ < CG_BUILD_ITER_LIMIT)) {
+ double start_time = time.seconds();
time.start();
new_edges = 0;
// Propagate references to phantom_object for nodes pushed on _worklist
@@ -1105,7 +1109,26 @@
for (int next = 0; next < java_objects_length; ++next) {
JavaObjectNode* ptn = java_objects_worklist.at(next);
new_edges += add_java_object_edges(ptn, true);
+
+#define SAMPLE_SIZE 4
+ if ((next % SAMPLE_SIZE) == 0) {
+ // Each 4 iterations calculate how much time it will take
+ // to complete graph construction.
+ time.stop();
+ double stop_time = time.seconds();
+ double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
+ double time_until_end = time_per_iter * (double)(java_objects_length - next);
+ if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
+ timeout = true;
+ break; // Timeout
+ }
+ start_time = stop_time;
+ time.start();
+ }
+#undef SAMPLE_SIZE
+
}
+ if (timeout) break;
if (new_edges > 0) {
// Update escape states on each iteration if graph was updated.
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@@ -1113,9 +1136,12 @@
}
}
time.stop();
+ if (time.seconds() >= EscapeAnalysisTimeout) {
+ timeout = true;
+ break;
+ }
}
- if ((iterations < CG_BUILD_ITER_LIMIT) &&
- (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+ if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
time.start();
// Find fields which have unknown value.
int fields_length = oop_fields_worklist.length();
@@ -1128,18 +1154,21 @@
}
}
time.stop();
+ if (time.seconds() >= EscapeAnalysisTimeout) {
+ timeout = true;
+ break;
+ }
} else {
new_edges = 0; // Bailout
}
} while (new_edges > 0);
// Bailout if passed limits.
- if ((iterations >= CG_BUILD_ITER_LIMIT) ||
- (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+ if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
Compile* C = _compile;
if (C->log() != NULL) {
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
- C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
+ C->log()->text("%s", timeout ? "time" : "iterations");
C->log()->end_elem(" limit'");
}
assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@@ -1156,7 +1185,6 @@
#endif
#undef CG_BUILD_ITER_LIMIT
-#undef CG_BUILD_TIME_LIMIT
// Find fields initialized by NULL for non-escaping Allocations.
int non_escaped_length = non_escaped_worklist.length();
@@ -1280,8 +1308,8 @@
}
}
}
- while(_worklist.length() > 0) {
- PointsToNode* use = _worklist.pop();
+ for (int l = 0; l < _worklist.length(); l++) {
+ PointsToNode* use = _worklist.at(l);
if (PointsToNode::is_base_use(use)) {
// Add reference from jobj to field and from field to jobj (field's base).
use = PointsToNode::get_use_node(use)->as_Field();
@@ -1328,6 +1356,8 @@
add_field_uses_to_worklist(use->as_Field());
}
}
+ _worklist.clear();
+ _in_worklist.Reset();
return new_edges;
}
@@ -1906,7 +1936,7 @@
return;
}
Compile* C = _compile;
- ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
+ ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@@ -1917,7 +1947,7 @@
return;
}
Compile* C = _compile;
- ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
+ ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
}
@@ -1933,7 +1963,7 @@
es = PointsToNode::GlobalEscape;
}
Compile* C = _compile;
- FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
+ FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
_nodes.at_put(n->_idx, field);
}
@@ -1947,7 +1977,7 @@
return;
}
Compile* C = _compile;
- ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
+ ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
_nodes.at_put(n->_idx, ptadr);
// Add edge from arraycopy node to source object.
(void)add_edge(ptadr, src);
--- a/hotspot/src/share/vm/opto/escape.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/escape.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -125,6 +125,8 @@
class FieldNode;
class ArraycopyNode;
+class ConnectionGraph;
+
// ConnectionGraph nodes
class PointsToNode : public ResourceObj {
GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@@ -137,6 +139,7 @@
Node* const _node; // Ideal node corresponding to this PointsTo node.
const int _idx; // Cached ideal node's _idx
+ const uint _pidx; // Index of this node
public:
typedef enum {
@@ -165,17 +168,9 @@
} NodeFlags;
- PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
- _edges(C->comp_arena(), 2, 0, NULL),
- _uses (C->comp_arena(), 2, 0, NULL),
- _node(n),
- _idx(n->_idx),
- _type((u1)type),
- _escape((u1)es),
- _fields_escape((u1)es),
- _flags(ScalarReplaceable) {
- assert(n != NULL && es != UnknownEscape, "sanity");
- }
+ inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
+
+ uint pidx() const { return _pidx; }
Node* ideal_node() const { return _node; }
int idx() const { return _idx; }
@@ -243,14 +238,14 @@
class LocalVarNode: public PointsToNode {
public:
- LocalVarNode(Compile *C, Node* n, EscapeState es):
- PointsToNode(C, n, es, LocalVar) {}
+ LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
+ PointsToNode(CG, n, es, LocalVar) {}
};
class JavaObjectNode: public PointsToNode {
public:
- JavaObjectNode(Compile *C, Node* n, EscapeState es):
- PointsToNode(C, n, es, JavaObject) {
+ JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
+ PointsToNode(CG, n, es, JavaObject) {
if (es > NoEscape)
set_scalar_replaceable(false);
}
@@ -262,8 +257,8 @@
const bool _is_oop; // Field points to object
bool _has_unknown_base; // Has phantom_object base
public:
- FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
- PointsToNode(C, n, es, Field),
+ FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
+ PointsToNode(CG, n, es, Field),
_offset(offs), _is_oop(is_oop),
_has_unknown_base(false) {}
@@ -284,8 +279,8 @@
class ArraycopyNode: public PointsToNode {
public:
- ArraycopyNode(Compile *C, Node* n, EscapeState es):
- PointsToNode(C, n, es, Arraycopy) {}
+ ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
+ PointsToNode(CG, n, es, Arraycopy) {}
};
// Iterators for PointsTo node's edges:
@@ -323,11 +318,14 @@
class ConnectionGraph: public ResourceObj {
+ friend class PointsToNode;
private:
GrowableArray<PointsToNode*> _nodes; // Map from ideal nodes to
// ConnectionGraph nodes.
GrowableArray<PointsToNode*> _worklist; // Nodes to be processed
+ VectorSet _in_worklist;
+ uint _next_pidx;
bool _collecting; // Indicates whether escape information
// is still being collected. If false,
@@ -353,6 +351,8 @@
}
uint nodes_size() const { return _nodes.length(); }
+ uint next_pidx() { return _next_pidx++; }
+
// Add nodes to ConnectionGraph.
void add_local_var(Node* n, PointsToNode::EscapeState es);
void add_java_object(Node* n, PointsToNode::EscapeState es);
@@ -396,15 +396,26 @@
int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
// Put node on worklist if it is (or was) not there.
- void add_to_worklist(PointsToNode* pt) {
- _worklist.push(pt);
- return;
+ inline void add_to_worklist(PointsToNode* pt) {
+ PointsToNode* ptf = pt;
+ uint pidx_bias = 0;
+ if (PointsToNode::is_base_use(pt)) {
+ // Create a separate entry in _in_worklist for a marked base edge
+ // because _worklist may have an entry for a normal edge pointing
+ // to the same node. To separate them use _next_pidx as bias.
+ ptf = PointsToNode::get_use_node(pt)->as_Field();
+ pidx_bias = _next_pidx;
+ }
+ if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
+ _worklist.append(pt);
+ }
}
// Put on worklist all uses of this node.
- void add_uses_to_worklist(PointsToNode* pt) {
- for (UseIterator i(pt); i.has_next(); i.next())
- _worklist.push(i.get());
+ inline void add_uses_to_worklist(PointsToNode* pt) {
+ for (UseIterator i(pt); i.has_next(); i.next()) {
+ add_to_worklist(i.get());
+ }
}
// Put on worklist all field's uses and related field nodes.
@@ -517,8 +528,8 @@
}
// Helper functions
bool is_oop_field(Node* n, int offset, bool* unsafe);
- static Node* get_addp_base(Node *addp);
- static Node* find_second_addp(Node* addp, Node* n);
+ static Node* get_addp_base(Node *addp);
+ static Node* find_second_addp(Node* addp, Node* n);
// offset of a field reference
int address_offset(Node* adr, PhaseTransform *phase);
@@ -587,4 +598,17 @@
#endif
};
+inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
+ _edges(CG->_compile->comp_arena(), 2, 0, NULL),
+ _uses (CG->_compile->comp_arena(), 2, 0, NULL),
+ _node(n),
+ _idx(n->_idx),
+ _pidx(CG->next_pidx()),
+ _type((u1)type),
+ _escape((u1)es),
+ _fields_escape((u1)es),
+ _flags(ScalarReplaceable) {
+ assert(n != NULL && es != UnknownEscape, "sanity");
+}
+
#endif // SHARE_VM_OPTO_ESCAPE_HPP
--- a/hotspot/src/share/vm/opto/lcm.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/lcm.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -464,9 +464,7 @@
iop == Op_CreateEx || // Create-exception must start block
iop == Op_CheckCastPP
) {
- // select the node n
- // remove n from worklist and retain the order of remaining nodes
- worklist.remove((uint)i);
+ worklist.map(i,worklist.pop());
return n;
}
@@ -552,9 +550,7 @@
assert(idx >= 0, "index should be set");
Node *n = worklist[(uint)idx]; // Get the winner
- // select the node n
- // remove n from worklist and retain the order of remaining nodes
- worklist.remove((uint)idx);
+ worklist.map((uint)idx, worklist.pop()); // Compress worklist
return n;
}
--- a/hotspot/src/share/vm/opto/library_call.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/library_call.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -268,6 +268,7 @@
bool inline_fp_conversions(vmIntrinsics::ID id);
bool inline_number_methods(vmIntrinsics::ID id);
bool inline_reference_get();
+ bool inline_Class_cast();
bool inline_aescrypt_Block(vmIntrinsics::ID id);
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
@@ -869,6 +870,8 @@
case vmIntrinsics::_Reference_get: return inline_reference_get();
+ case vmIntrinsics::_Class_cast: return inline_Class_cast();
+
case vmIntrinsics::_aescrypt_encryptBlock:
case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
@@ -3546,6 +3549,89 @@
return true;
}
+//-------------------------inline_Class_cast-------------------
+bool LibraryCallKit::inline_Class_cast() {
+ Node* mirror = argument(0); // Class
+ Node* obj = argument(1);
+ const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
+ if (mirror_con == NULL) {
+ return false; // dead path (mirror->is_top()).
+ }
+ if (obj == NULL || obj->is_top()) {
+ return false; // dead path
+ }
+ const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
+
+ // First, see if Class.cast() can be folded statically.
+ // java_mirror_type() returns non-null for compile-time Class constants.
+ ciType* tm = mirror_con->java_mirror_type();
+ if (tm != NULL && tm->is_klass() &&
+ tp != NULL && tp->klass() != NULL) {
+ if (!tp->klass()->is_loaded()) {
+ // Don't use intrinsic when class is not loaded.
+ return false;
+ } else {
+ int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
+ if (static_res == Compile::SSC_always_true) {
+ // isInstance() is true - fold the code.
+ set_result(obj);
+ return true;
+ } else if (static_res == Compile::SSC_always_false) {
+ // Don't use intrinsic, have to throw ClassCastException.
+ // If the reference is null, the non-intrinsic bytecode will
+ // be optimized appropriately.
+ return false;
+ }
+ }
+ }
+
+ // Bailout intrinsic and do normal inlining if exception path is frequent.
+ if (too_many_traps(Deoptimization::Reason_intrinsic)) {
+ return false;
+ }
+
+ // Generate dynamic checks.
+ // Class.cast() is java implementation of _checkcast bytecode.
+ // Do checkcast (Parse::do_checkcast()) optimizations here.
+
+ mirror = null_check(mirror);
+ // If mirror is dead, only null-path is taken.
+ if (stopped()) {
+ return true;
+ }
+
+ // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
+ enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
+ RegionNode* region = new RegionNode(PATH_LIMIT);
+ record_for_igvn(region);
+
+ // Now load the mirror's klass metaobject, and null-check it.
+ // If kls is null, we have a primitive mirror and
+ // nothing is an instance of a primitive type.
+ Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
+
+ Node* res = top();
+ if (!stopped()) {
+ Node* bad_type_ctrl = top();
+ // Do checkcast optimizations.
+ res = gen_checkcast(obj, kls, &bad_type_ctrl);
+ region->init_req(_bad_type_path, bad_type_ctrl);
+ }
+ if (region->in(_prim_path) != top() ||
+ region->in(_bad_type_path) != top()) {
+ // Let Interpreter throw ClassCastException.
+ PreserveJVMState pjvms(this);
+ set_control(_gvn.transform(region));
+ uncommon_trap(Deoptimization::Reason_intrinsic,
+ Deoptimization::Action_maybe_recompile);
+ }
+ if (!stopped()) {
+ set_result(res);
+ }
+ return true;
+}
+
+
//--------------------------inline_native_subtype_check------------------------
// This intrinsic takes the JNI calls out of the heart of
// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
@@ -4611,6 +4697,10 @@
Node* dest_offset = argument(3); // type: int
Node* length = argument(4); // type: int
+ // Check for allocation before we add nodes that would confuse
+ // tightly_coupled_allocation()
+ AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
+
// The following tests must be performed
// (1) src and dest are arrays.
// (2) src and dest arrays must have elements of the same BasicType
@@ -4784,7 +4874,6 @@
return true;
}
- AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL,
// Create LoadRange and LoadKlass nodes for use during macro expansion here
// so the compiler has a chance to eliminate them: during macro expansion,
--- a/hotspot/src/share/vm/opto/memnode.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1257,6 +1257,16 @@
result = new ConvI2LNode(phase->transform(result));
}
#endif
+ // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
+ // Need to preserve unboxing load type if it is unsigned.
+ switch(this->Opcode()) {
+ case Op_LoadUB:
+ result = new AndINode(phase->transform(result), phase->intcon(0xFF));
+ break;
+ case Op_LoadUS:
+ result = new AndINode(phase->transform(result), phase->intcon(0xFFFF));
+ break;
+ }
return result;
}
}
--- a/hotspot/src/share/vm/opto/mulnode.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/mulnode.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -610,7 +610,7 @@
// convert masks which would cause a sign extension of the integer
// value. This check includes UI2L masks (0x00000000FFFFFFFF) which
// would be optimized away later in Identity.
- if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) {
+ if (op == Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) {
Node* andi = new AndINode(in1->in(1), phase->intcon(mask));
andi = phase->transform(andi);
return new ConvI2LNode(andi);
--- a/hotspot/src/share/vm/opto/output.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/opto/output.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1166,7 +1166,6 @@
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL;
}
// Configure the code buffer.
@@ -1491,7 +1490,6 @@
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
@@ -1648,7 +1646,6 @@
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return;
}
--- a/hotspot/src/share/vm/prims/jni.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -951,8 +951,9 @@
// Optimized path if we have the bitvector form of signature
void iterate( uint64_t fingerprint ) {
- if ( fingerprint == UCONST64(-1) ) SignatureIterator::iterate();// Must be too many arguments
- else {
+ if (fingerprint == (uint64_t)CONST64(-1)) {
+ SignatureIterator::iterate(); // Must be too many arguments
+ } else {
_return_type = (BasicType)((fingerprint >> static_feature_size) &
result_feature_mask);
@@ -1022,8 +1023,9 @@
// Optimized path if we have the bitvector form of signature
void iterate( uint64_t fingerprint ) {
- if ( fingerprint == UCONST64(-1) ) SignatureIterator::iterate(); // Must be too many arguments
- else {
+ if (fingerprint == (uint64_t)CONST64(-1)) {
+ SignatureIterator::iterate(); // Must be too many arguments
+ } else {
_return_type = (BasicType)((fingerprint >> static_feature_size) &
result_feature_mask);
assert(fingerprint, "Fingerprint should not be 0");
--- a/hotspot/src/share/vm/prims/jvm.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvm.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -2583,7 +2583,14 @@
int jio_vsnprintf(char *str, size_t count, const char *fmt, va_list args) {
// see bug 4399518, 4417214
if ((intptr_t)count <= 0) return -1;
- return vsnprintf(str, count, fmt, args);
+
+ int result = vsnprintf(str, count, fmt, args);
+ if ((result > 0 && (size_t)result >= count) || result == -1) {
+ str[count - 1] = '\0';
+ result = -1;
+ }
+
+ return result;
}
ATTRIBUTE_PRINTF(3, 0)
@@ -3271,8 +3278,10 @@
THROW_0(vmSymbols::java_lang_NullPointerException());
}
oop a = JNIHandles::resolve_non_null(arr);
- if (!a->is_array() || (type_array_only && !a->is_typeArray())) {
+ if (!a->is_array()) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Argument is not an array");
+ } else if (type_array_only && !a->is_typeArray()) {
+ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Argument is not an array of primitive type");
}
return arrayOop(a);
}
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -41,6 +41,7 @@
void JvmtiClassFileReconstituter::write_field_infos() {
HandleMark hm(thread());
Array<AnnotationArray*>* fields_anno = ikh()->fields_annotations();
+ Array<AnnotationArray*>* fields_type_anno = ikh()->fields_type_annotations();
// Compute the real number of Java fields
int java_fields = ikh()->java_fields_count();
@@ -55,6 +56,7 @@
// int offset = ikh()->field_offset( index );
int generic_signature_index = fs.generic_signature_index();
AnnotationArray* anno = fields_anno == NULL ? NULL : fields_anno->at(fs.index());
+ AnnotationArray* type_anno = fields_type_anno == NULL ? NULL : fields_type_anno->at(fs.index());
// JVMSpec| field_info {
// JVMSpec| u2 access_flags;
@@ -80,6 +82,9 @@
if (anno != NULL) {
++attr_count; // has RuntimeVisibleAnnotations attribute
}
+ if (type_anno != NULL) {
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
+ }
write_u2(attr_count);
@@ -97,6 +102,9 @@
if (anno != NULL) {
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
}
+ if (type_anno != NULL) {
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+ }
}
}
@@ -537,6 +545,7 @@
AnnotationArray* anno = method->annotations();
AnnotationArray* param_anno = method->parameter_annotations();
AnnotationArray* default_anno = method->annotation_default();
+ AnnotationArray* type_anno = method->type_annotations();
// skip generated default interface methods
if (method->is_overpass()) {
@@ -572,6 +581,9 @@
if (param_anno != NULL) {
++attr_count; // has RuntimeVisibleParameterAnnotations attribute
}
+ if (type_anno != NULL) {
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
+ }
write_u2(attr_count);
if (const_method->code_size() > 0) {
@@ -596,6 +608,9 @@
if (param_anno != NULL) {
write_annotations_attribute("RuntimeVisibleParameterAnnotations", param_anno);
}
+ if (type_anno != NULL) {
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+ }
}
// Write the class attributes portion of ClassFile structure
@@ -605,6 +620,7 @@
u2 inner_classes_length = inner_classes_attribute_length();
Symbol* generic_signature = ikh()->generic_signature();
AnnotationArray* anno = ikh()->class_annotations();
+ AnnotationArray* type_anno = ikh()->class_type_annotations();
int attr_count = 0;
if (generic_signature != NULL) {
@@ -622,6 +638,9 @@
if (anno != NULL) {
++attr_count; // has RuntimeVisibleAnnotations attribute
}
+ if (type_anno != NULL) {
+ ++attr_count; // has RuntimeVisibleTypeAnnotations attribute
+ }
if (cpool()->operands() != NULL) {
++attr_count;
}
@@ -643,6 +662,9 @@
if (anno != NULL) {
write_annotations_attribute("RuntimeVisibleAnnotations", anno);
}
+ if (type_anno != NULL) {
+ write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+ }
if (cpool()->operands() != NULL) {
write_bootstrapmethod_attribute();
}
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -2263,6 +2263,8 @@
int result_length = instanceK_h->methods()->length();
jmethodID* result_list = (jmethodID*)jvmtiMalloc(result_length * sizeof(jmethodID));
int index;
+ bool jmethodids_found = true;
+
if (JvmtiExport::can_maintain_original_method_order()) {
// Use the original method ordering indices stored in the class, so we can emit
// jmethodIDs in the order they appeared in the class file
@@ -2270,14 +2272,40 @@
Method* m = instanceK_h->methods()->at(index);
int original_index = instanceK_h->method_ordering()->at(index);
assert(original_index >= 0 && original_index < result_length, "invalid original method index");
- jmethodID id = m->jmethod_id();
+ jmethodID id;
+ if (jmethodids_found) {
+ id = m->find_jmethod_id_or_null();
+ if (id == NULL) {
+ // If we find an uninitialized value, make sure there is
+ // enough space for all the uninitialized values we might
+ // find.
+ instanceK_h->ensure_space_for_methodids(index);
+ jmethodids_found = false;
+ id = m->jmethod_id();
+ }
+ } else {
+ id = m->jmethod_id();
+ }
result_list[original_index] = id;
}
} else {
// otherwise just copy in any order
for (index = 0; index < result_length; index++) {
Method* m = instanceK_h->methods()->at(index);
- jmethodID id = m->jmethod_id();
+ jmethodID id;
+ if (jmethodids_found) {
+ id = m->find_jmethod_id_or_null();
+ if (id == NULL) {
+ // If we find an uninitialized value, make sure there is
+ // enough space for all the uninitialized values we might
+ // find.
+ instanceK_h->ensure_space_for_methodids(index);
+ jmethodids_found = false;
+ id = m->jmethod_id();
+ }
+ } else {
+ id = m->jmethod_id();
+ }
result_list[index] = id;
}
}
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1569,6 +1569,29 @@
return false;
}
+ // rewrite constant pool references in the class_type_annotations:
+ if (!rewrite_cp_refs_in_class_type_annotations(scratch_class, THREAD)) {
+ // propagate failure back to caller
+ return false;
+ }
+
+ // rewrite constant pool references in the fields_type_annotations:
+ if (!rewrite_cp_refs_in_fields_type_annotations(scratch_class, THREAD)) {
+ // propagate failure back to caller
+ return false;
+ }
+
+ // rewrite constant pool references in the methods_type_annotations:
+ if (!rewrite_cp_refs_in_methods_type_annotations(scratch_class, THREAD)) {
+ // propagate failure back to caller
+ return false;
+ }
+
+ // There can be type annotations in the Code part of a method_info attribute.
+ // These annotations are not accessible, even by reflection.
+ // Currently they are not even parsed by the ClassFileParser.
+ // If runtime access is added they will also need to be rewritten.
+
// rewrite source file name index:
u2 source_file_name_idx = scratch_class->source_file_name_index();
if (source_file_name_idx != 0) {
@@ -2239,6 +2262,588 @@
} // end rewrite_cp_refs_in_methods_default_annotations()
+// Rewrite constant pool references in a class_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_class_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS) {
+
+ AnnotationArray* class_type_annotations = scratch_class->class_type_annotations();
+ if (class_type_annotations == NULL || class_type_annotations->length() == 0) {
+ // no class_type_annotations so nothing to do
+ return true;
+ }
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("class_type_annotations length=%d", class_type_annotations->length()));
+
+ int byte_i = 0; // byte index into class_type_annotations
+ return rewrite_cp_refs_in_type_annotations_typeArray(class_type_annotations,
+ byte_i, "ClassFile", THREAD);
+} // end rewrite_cp_refs_in_class_type_annotations()
+
+
+// Rewrite constant pool references in a fields_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_fields_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS) {
+
+ Array<AnnotationArray*>* fields_type_annotations = scratch_class->fields_type_annotations();
+ if (fields_type_annotations == NULL || fields_type_annotations->length() == 0) {
+ // no fields_type_annotations so nothing to do
+ return true;
+ }
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("fields_type_annotations length=%d", fields_type_annotations->length()));
+
+ for (int i = 0; i < fields_type_annotations->length(); i++) {
+ AnnotationArray* field_type_annotations = fields_type_annotations->at(i);
+ if (field_type_annotations == NULL || field_type_annotations->length() == 0) {
+ // this field does not have any annotations so skip it
+ continue;
+ }
+
+ int byte_i = 0; // byte index into field_type_annotations
+ if (!rewrite_cp_refs_in_type_annotations_typeArray(field_type_annotations,
+ byte_i, "field_info", THREAD)) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("bad field_type_annotations at %d", i));
+ // propagate failure back to caller
+ return false;
+ }
+ }
+
+ return true;
+} // end rewrite_cp_refs_in_fields_type_annotations()
+
+
+// Rewrite constant pool references in a methods_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_methods_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS) {
+
+ for (int i = 0; i < scratch_class->methods()->length(); i++) {
+ Method* m = scratch_class->methods()->at(i);
+ AnnotationArray* method_type_annotations = m->constMethod()->type_annotations();
+
+ if (method_type_annotations == NULL || method_type_annotations->length() == 0) {
+ // this method does not have any annotations so skip it
+ continue;
+ }
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("methods type_annotations length=%d", method_type_annotations->length()));
+
+ int byte_i = 0; // byte index into method_type_annotations
+ if (!rewrite_cp_refs_in_type_annotations_typeArray(method_type_annotations,
+ byte_i, "method_info", THREAD)) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("bad method_type_annotations at %d", i));
+ // propagate failure back to caller
+ return false;
+ }
+ }
+
+ return true;
+} // end rewrite_cp_refs_in_methods_type_annotations()
+
+
+// Rewrite constant pool references in a type_annotations
+// field. This "structure" is adapted from the
+// RuntimeVisibleTypeAnnotations_attribute described in
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
+//
+// type_annotations_typeArray {
+// u2 num_annotations;
+// type_annotation annotations[num_annotations];
+// }
+//
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotations_typeArray(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS) {
+
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ // not enough room for num_annotations field
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for num_annotations field"));
+ return false;
+ }
+
+ u2 num_annotations = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("num_type_annotations=%d", num_annotations));
+
+ int calc_num_annotations = 0;
+ for (; calc_num_annotations < num_annotations; calc_num_annotations++) {
+ if (!rewrite_cp_refs_in_type_annotation_struct(type_annotations_typeArray,
+ byte_i_ref, location_mesg, THREAD)) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("bad type_annotation_struct at %d", calc_num_annotations));
+ // propagate failure back to caller
+ return false;
+ }
+ }
+ assert(num_annotations == calc_num_annotations, "sanity check");
+
+ if (byte_i_ref != type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("read wrong amount of bytes at end of processing "
+ "type_annotations_typeArray (%d of %d bytes were read)",
+ byte_i_ref, type_annotations_typeArray->length()));
+ return false;
+ }
+
+ return true;
+} // end rewrite_cp_refs_in_type_annotations_typeArray()
+
+
+// Rewrite constant pool references in a type_annotation
+// field. This "structure" is adapted from the
+// RuntimeVisibleTypeAnnotations_attribute described in
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
+//
+// type_annotation {
+// u1 target_type;
+// union {
+// type_parameter_target;
+// supertype_target;
+// type_parameter_bound_target;
+// empty_target;
+// method_formal_parameter_target;
+// throws_target;
+// localvar_target;
+// catch_target;
+// offset_target;
+// type_argument_target;
+// } target_info;
+// type_path target_path;
+// annotation anno;
+// }
+//
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotation_struct(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS) {
+
+ if (!skip_type_annotation_target(type_annotations_typeArray,
+ byte_i_ref, location_mesg, THREAD)) {
+ return false;
+ }
+
+ if (!skip_type_annotation_type_path(type_annotations_typeArray,
+ byte_i_ref, THREAD)) {
+ return false;
+ }
+
+ if (!rewrite_cp_refs_in_annotation_struct(type_annotations_typeArray,
+ byte_i_ref, THREAD)) {
+ return false;
+ }
+
+ return true;
+} // end rewrite_cp_refs_in_type_annotation_struct()
+
+
+// Read, verify and skip over the target_type and target_info part
+// so that rewriting can continue in the later parts of the struct.
+//
+// u1 target_type;
+// union {
+// type_parameter_target;
+// supertype_target;
+// type_parameter_bound_target;
+// empty_target;
+// method_formal_parameter_target;
+// throws_target;
+// localvar_target;
+// catch_target;
+// offset_target;
+// type_argument_target;
+// } target_info;
+//
+bool VM_RedefineClasses::skip_type_annotation_target(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS) {
+
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+ // not enough room for a target_type let alone the rest of a type_annotation
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a target_type"));
+ return false;
+ }
+
+ u1 target_type = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("target_type=0x%.2x", target_type));
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("location=%s", location_mesg));
+
+ // Skip over target_info
+ switch (target_type) {
+ case 0x00:
+ // kind: type parameter declaration of generic class or interface
+ // location: ClassFile
+ case 0x01:
+ // kind: type parameter declaration of generic method or constructor
+ // location: method_info
+
+ {
+ // struct:
+ // type_parameter_target {
+ // u1 type_parameter_index;
+ // }
+ //
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a type_parameter_target"));
+ return false;
+ }
+
+ u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("type_parameter_target: type_parameter_index=%d",
+ type_parameter_index));
+ } break;
+
+ case 0x10:
+ // kind: type in extends clause of class or interface declaration
+ // (including the direct superclass of an anonymous class declaration),
+ // or in implements clause of interface declaration
+ // location: ClassFile
+
+ {
+ // struct:
+ // supertype_target {
+ // u2 supertype_index;
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a supertype_target"));
+ return false;
+ }
+
+ u2 supertype_index = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("supertype_target: supertype_index=%d", supertype_index));
+ } break;
+
+ case 0x11:
+ // kind: type in bound of type parameter declaration of generic class or interface
+ // location: ClassFile
+ case 0x12:
+ // kind: type in bound of type parameter declaration of generic method or constructor
+ // location: method_info
+
+ {
+ // struct:
+ // type_parameter_bound_target {
+ // u1 type_parameter_index;
+ // u1 bound_index;
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a type_parameter_bound_target"));
+ return false;
+ }
+
+ u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+ u1 bound_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("type_parameter_bound_target: type_parameter_index=%d, bound_index=%d",
+ type_parameter_index, bound_index));
+ } break;
+
+ case 0x13:
+ // kind: type in field declaration
+ // location: field_info
+ case 0x14:
+ // kind: return type of method, or type of newly constructed object
+ // location: method_info
+ case 0x15:
+ // kind: receiver type of method or constructor
+ // location: method_info
+
+ {
+ // struct:
+ // empty_target {
+ // }
+ //
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("empty_target"));
+ } break;
+
+ case 0x16:
+ // kind: type in formal parameter declaration of method, constructor, or lambda expression
+ // location: method_info
+
+ {
+ // struct:
+ // formal_parameter_target {
+ // u1 formal_parameter_index;
+ // }
+ //
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a formal_parameter_target"));
+ return false;
+ }
+
+ u1 formal_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("formal_parameter_target: formal_parameter_index=%d",
+ formal_parameter_index));
+ } break;
+
+ case 0x17:
+ // kind: type in throws clause of method or constructor
+ // location: method_info
+
+ {
+ // struct:
+ // throws_target {
+ // u2 throws_type_index
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a throws_target"));
+ return false;
+ }
+
+ u2 throws_type_index = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("throws_target: throws_type_index=%d", throws_type_index));
+ } break;
+
+ case 0x40:
+ // kind: type in local variable declaration
+ // location: Code
+ case 0x41:
+ // kind: type in resource variable declaration
+ // location: Code
+
+ {
+ // struct:
+ // localvar_target {
+ // u2 table_length;
+ // struct {
+ // u2 start_pc;
+ // u2 length;
+ // u2 index;
+ // } table[table_length];
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ // not enough room for a table_length let alone the rest of a localvar_target
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a localvar_target table_length"));
+ return false;
+ }
+
+ u2 table_length = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("localvar_target: table_length=%d", table_length));
+
+ int table_struct_size = 2 + 2 + 2; // 3 u2 variables per table entry
+ int table_size = table_length * table_struct_size;
+
+ if ((byte_i_ref + table_size) > type_annotations_typeArray->length()) {
+ // not enough room for a table
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a table array of length %d", table_length));
+ return false;
+ }
+
+ // Skip over table
+ byte_i_ref += table_size;
+ } break;
+
+ case 0x42:
+ // kind: type in exception parameter declaration
+ // location: Code
+
+ {
+ // struct:
+ // catch_target {
+ // u2 exception_table_index;
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a catch_target"));
+ return false;
+ }
+
+ u2 exception_table_index = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("catch_target: exception_table_index=%d", exception_table_index));
+ } break;
+
+ case 0x43:
+ // kind: type in instanceof expression
+ // location: Code
+ case 0x44:
+ // kind: type in new expression
+ // location: Code
+ case 0x45:
+ // kind: type in method reference expression using ::new
+ // location: Code
+ case 0x46:
+ // kind: type in method reference expression using ::Identifier
+ // location: Code
+
+ {
+ // struct:
+ // offset_target {
+ // u2 offset;
+ // }
+ //
+ if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a offset_target"));
+ return false;
+ }
+
+ u2 offset = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("offset_target: offset=%d", offset));
+ } break;
+
+ case 0x47:
+ // kind: type in cast expression
+ // location: Code
+ case 0x48:
+ // kind: type argument for generic constructor in new expression or
+ // explicit constructor invocation statement
+ // location: Code
+ case 0x49:
+ // kind: type argument for generic method in method invocation expression
+ // location: Code
+ case 0x4A:
+ // kind: type argument for generic constructor in method reference expression using ::new
+ // location: Code
+ case 0x4B:
+ // kind: type argument for generic method in method reference expression using ::Identifier
+ // location: Code
+
+ {
+ // struct:
+ // type_argument_target {
+ // u2 offset;
+ // u1 type_argument_index;
+ // }
+ //
+ if ((byte_i_ref + 3) > type_annotations_typeArray->length()) {
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a type_argument_target"));
+ return false;
+ }
+
+ u2 offset = Bytes::get_Java_u2((address)
+ type_annotations_typeArray->adr_at(byte_i_ref));
+ byte_i_ref += 2;
+ u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("type_argument_target: offset=%d, type_argument_index=%d",
+ offset, type_argument_index));
+ } break;
+
+ default:
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("unknown target_type"));
+#ifdef ASSERT
+ ShouldNotReachHere();
+#endif
+ return false;
+ }
+
+ return true;
+} // end skip_type_annotation_target()
+
+
+// Read, verify and skip over the type_path part so that rewriting
+// can continue in the later parts of the struct.
+//
+// type_path {
+// u1 path_length;
+// {
+// u1 type_path_kind;
+// u1 type_argument_index;
+// } path[path_length];
+// }
+//
+bool VM_RedefineClasses::skip_type_annotation_type_path(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS) {
+
+ if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+ // not enough room for a path_length let alone the rest of the type_path
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for a type_path"));
+ return false;
+ }
+
+ u1 path_length = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("type_path: path_length=%d", path_length));
+
+ int calc_path_length = 0;
+ for (; calc_path_length < path_length; calc_path_length++) {
+ if ((byte_i_ref + 1 + 1) > type_annotations_typeArray->length()) {
+ // not enough room for a path
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("length() is too small for path entry %d of %d",
+ calc_path_length, path_length));
+ return false;
+ }
+
+ u1 type_path_kind = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+ u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
+ byte_i_ref += 1;
+
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("type_path: path[%d]: type_path_kind=%d, type_argument_index=%d",
+ calc_path_length, type_path_kind, type_argument_index));
+
+ if (type_path_kind > 3 || (type_path_kind != 3 && type_argument_index != 0)) {
+ // not enough room for a path
+ RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+ ("inconsistent type_path values"));
+ return false;
+ }
+ }
+ assert(path_length == calc_path_length, "sanity check");
+
+ return true;
+} // end skip_type_annotation_type_path()
+
+
// Rewrite constant pool references in the method's stackmap table.
// These "structures" are adapted from the StackMapTable_attribute that
// is described in section 4.8.4 of the 6.0 version of the VM spec
@@ -3223,23 +3828,6 @@
void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class,
instanceKlassHandle scratch_class) {
- // Since there is currently no rewriting of type annotations indexes
- // into the CP, we null out type annotations on scratch_class before
- // we swap annotations with the_class rather than facing the
- // possibility of shipping annotations with broken indexes to
- // Java-land.
- ClassLoaderData* loader_data = scratch_class->class_loader_data();
- AnnotationArray* new_class_type_annotations = scratch_class->class_type_annotations();
- if (new_class_type_annotations != NULL) {
- MetadataFactory::free_array<u1>(loader_data, new_class_type_annotations);
- scratch_class->annotations()->set_class_type_annotations(NULL);
- }
- Array<AnnotationArray*>* new_field_type_annotations = scratch_class->fields_type_annotations();
- if (new_field_type_annotations != NULL) {
- Annotations::free_contents(loader_data, new_field_type_annotations);
- scratch_class->annotations()->set_fields_type_annotations(NULL);
- }
-
// Swap annotation fields values
Annotations* old_annotations = the_class->annotations();
the_class->set_annotations(scratch_class->annotations());
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -452,6 +452,17 @@
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_element_value(
AnnotationArray* class_annotations, int &byte_i_ref, TRAPS);
+ bool rewrite_cp_refs_in_type_annotations_typeArray(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS);
+ bool rewrite_cp_refs_in_type_annotation_struct(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS);
+ bool skip_type_annotation_target(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+ const char * location_mesg, TRAPS);
+ bool skip_type_annotation_type_path(
+ AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS);
bool rewrite_cp_refs_in_fields_annotations(
instanceKlassHandle scratch_class, TRAPS);
void rewrite_cp_refs_in_method(methodHandle method,
@@ -463,6 +474,12 @@
instanceKlassHandle scratch_class, TRAPS);
bool rewrite_cp_refs_in_methods_parameter_annotations(
instanceKlassHandle scratch_class, TRAPS);
+ bool rewrite_cp_refs_in_class_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS);
+ bool rewrite_cp_refs_in_fields_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS);
+ bool rewrite_cp_refs_in_methods_type_annotations(
+ instanceKlassHandle scratch_class, TRAPS);
void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS);
void rewrite_cp_refs_in_verification_type_info(
address& stackmap_addr_ref, address stackmap_end, u2 frame_i,
--- a/hotspot/src/share/vm/prims/methodHandles.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -36,6 +36,7 @@
#include "runtime/reflection.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
+#include "utilities/exceptions.hpp"
/*
@@ -55,26 +56,30 @@
bool MethodHandles::_enabled = false; // set true after successful native linkage
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
-//------------------------------------------------------------------------------
-// MethodHandles::generate_adapters
-//
-void MethodHandles::generate_adapters() {
- if (SystemDictionary::MethodHandle_klass() == NULL) return;
+
+/**
+ * Generates method handle adapters. Returns 'false' if memory allocation
+ * failed and true otherwise.
+ */
+bool MethodHandles::generate_adapters() {
+ if (SystemDictionary::MethodHandle_klass() == NULL) {
+ return true;
+ }
assert(_adapter_code == NULL, "generate only once");
ResourceMark rm;
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
_adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
- if (_adapter_code == NULL)
- vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR,
- "CodeCache: no room for MethodHandles adapters");
- {
- CodeBuffer code(_adapter_code);
- MethodHandlesAdapterGenerator g(&code);
- g.generate();
- code.log_section_sizes("MethodHandlesAdapterBlob");
+ if (_adapter_code == NULL) {
+ return false;
}
+
+ CodeBuffer code(_adapter_code);
+ MethodHandlesAdapterGenerator g(&code);
+ g.generate();
+ code.log_section_sizes("MethodHandlesAdapterBlob");
+ return true;
}
//------------------------------------------------------------------------------
@@ -1401,7 +1406,9 @@
}
if (enable_MH) {
- MethodHandles::generate_adapters();
+ if (MethodHandles::generate_adapters() == false) {
+ THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
+ }
MethodHandles::set_enabled(true);
}
}
--- a/hotspot/src/share/vm/prims/methodHandles.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -69,7 +69,7 @@
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
// Generate MethodHandles adapters.
- static void generate_adapters();
+ static bool generate_adapters();
// Called from MethodHandlesAdapterGenerator.
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
--- a/hotspot/src/share/vm/prims/unsafe.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/unsafe.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -802,8 +802,7 @@
static inline void throw_new(JNIEnv *env, const char *ename) {
char buf[100];
- strcpy(buf, "java/lang/");
- strcat(buf, ename);
+ jio_snprintf(buf, 100, "%s%s", "java/lang/", ename);
jclass cls = env->FindClass(buf);
if (env->ExceptionCheck()) {
env->ExceptionClear();
--- a/hotspot/src/share/vm/prims/whitebox.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/prims/whitebox.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -282,7 +282,7 @@
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
- addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
+ addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
return addr;
WB_END
@@ -291,7 +291,7 @@
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
address pc = (address)(size_t)pseudo_stack;
NativeCallStack stack(&pc, 1);
- return (jlong)os::malloc(size, mtTest, stack);
+ return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
WB_END
// Free the memory allocated by NMTAllocTest
@@ -326,15 +326,6 @@
return MemTracker::tracking_level() == NMT_detail;
WB_END
-WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
- address pc = (address)1;
- for (jlong index = 0; index < num; index ++) {
- NativeCallStack stack(&pc, 1);
- os::malloc(0, mtTest, stack);
- pc += MallocSiteTable::hash_buckets();
- }
-WB_END
-
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
@@ -365,6 +356,12 @@
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END
+
+WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
+ int hash_size = MallocSiteTable::hash_buckets();
+ assert(hash_size > 0, "NMT hash_size should be > 0");
+ return (jint)hash_size;
+WB_END
#endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -386,19 +383,10 @@
CHECK_JNI_EXCEPTION_(env, result);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- nmethod* code;
if (is_osr) {
- int bci = InvocationEntryBci;
- while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
- code->mark_for_deoptimization();
- ++result;
- bci = code->osr_entry_bci() + 1;
- }
- } else {
- code = mh->code();
- }
- if (code != NULL) {
- code->mark_for_deoptimization();
+ result += mh->mark_osr_nmethods();
+ } else if (mh->code() != NULL) {
+ mh->code()->mark_for_deoptimization();
++result;
}
result += CodeCache::mark_for_deoptimization(mh());
@@ -518,16 +506,6 @@
static AlwaysFalseClosure always_false;
-class VM_WhiteBoxCleanMethodData : public VM_WhiteBoxOperation {
- public:
- VM_WhiteBoxCleanMethodData(MethodData* mdo) : _mdo(mdo) { }
- void doit() {
- _mdo->clean_method_data(&always_false);
- }
- private:
- MethodData* _mdo;
-};
-
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION(env);
@@ -543,8 +521,8 @@
for (int i = 0; i < arg_count; i++) {
mdo->set_arg_modified(i, 0);
}
- VM_WhiteBoxCleanMethodData op(mdo);
- VMThread::execute(&op);
+ MutexLockerEx mu(mdo->extra_data_lock());
+ mdo->clean_method_data(&always_false);
}
mh->clear_not_c1_compilable();
@@ -566,13 +544,13 @@
WB_END
template <typename T>
-static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) {
+static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*, bool, bool)) {
if (name == NULL) {
return false;
}
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
- bool result = (*TAt)(flag_name, value);
+ bool result = (*TAt)(flag_name, value, true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
@@ -619,6 +597,24 @@
return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
}
+static Flag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
+ ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
+ const char* flag_name = env->GetStringUTFChars(name, NULL);
+ Flag* result = Flag::find_flag(flag_name, strlen(flag_name), true, true);
+ env->ReleaseStringUTFChars(name, flag_name);
+ return result;
+}
+
+WB_ENTRY(jboolean, WB_IsConstantVMFlag(JNIEnv* env, jobject o, jstring name))
+ Flag* flag = getVMFlag(thread, env, name);
+ return (flag != NULL) && flag->is_constant_in_binary();
+WB_END
+
+WB_ENTRY(jboolean, WB_IsLockedVMFlag(JNIEnv* env, jobject o, jstring name))
+ Flag* flag = getVMFlag(thread, env, name);
+ return (flag != NULL) && !(flag->is_unlocked() || flag->is_unlocker());
+WB_END
+
WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
bool result;
if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
@@ -794,20 +790,24 @@
ThreadToNativeFromVM ttn(thread);
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
CHECK_JNI_EXCEPTION_(env, NULL);
- result = env->NewObjectArray(2, clazz, NULL);
+ result = env->NewObjectArray(3, clazz, NULL);
if (result == NULL) {
return result;
}
- jobject obj = integerBox(thread, env, code->comp_level());
+ jobject level = integerBox(thread, env, code->comp_level());
CHECK_JNI_EXCEPTION_(env, NULL);
- env->SetObjectArrayElement(result, 0, obj);
+ env->SetObjectArrayElement(result, 0, level);
jbyteArray insts = env->NewByteArray(insts_size);
CHECK_JNI_EXCEPTION_(env, NULL);
env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
env->SetObjectArrayElement(result, 1, insts);
+ jobject id = integerBox(thread, env, code->compile_id());
+ CHECK_JNI_EXCEPTION_(env, NULL);
+ env->SetObjectArrayElement(result, 2, id);
+
return result;
WB_END
@@ -989,9 +989,9 @@
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
- {CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
+ {CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
@@ -1018,6 +1018,8 @@
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
{CC"clearMethodState",
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
+ {CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
+ {CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
{CC"setIntxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
{CC"setUintxVMFlag", CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -54,7 +54,7 @@
#endif // INCLUDE_ALL_GCS
// Note: This is a special bug reporting site for the JVM
-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
+#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
#define DEFAULT_JAVA_LAUNCHER "generic"
// Disable options not supported in this release, with a warning if they
@@ -306,6 +306,9 @@
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "NmethodSweepFraction", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "NmethodSweepCheckInterval", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "CodeCacheMinimumFreeSpace", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifndef ZERO
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
@@ -2528,7 +2531,7 @@
// Check lower bounds of the code cache
// Template Interpreter code is approximately 3X larger in debug builds.
- uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+ uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
jio_fprintf(defaultStream::error_stream(),
"Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
@@ -2564,10 +2567,11 @@
status = false;
}
- status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
+ status &= verify_interval(StartAggressiveSweepingAt, 0, 100, "StartAggressiveSweepingAt");
+
int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
// The default CICompilerCount's value is CI_COMPILER_COUNT.
@@ -3992,12 +3996,6 @@
#endif
#endif
- // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
- if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
- FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
- }
-
-
// Set heap size based on available physical memory
set_heap_size();
@@ -4065,13 +4063,6 @@
}
#ifndef PRODUCT
- if (CompileTheWorld) {
- // Force NmethodSweeper to sweep whole CodeCache each time.
- if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
- NmethodSweepFraction = 1;
- }
- }
-
if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
if (use_vm_log()) {
LogVMOutput = true;
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1173,7 +1173,7 @@
void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
// in case of an unresolved klass entry, load the class.
if (constant_pool->tag_at(index).is_unresolved_klass()) {
- Klass* tk = constant_pool->klass_at(index, CHECK);
+ Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
return;
}
--- a/hotspot/src/share/vm/runtime/globals.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/globals.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -28,6 +28,7 @@
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
#include "utilities/top.hpp"
@@ -634,8 +635,8 @@
e.commit();
}
-bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_bool()) return false;
*value = result->get_bool();
@@ -662,8 +663,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_intx()) return false;
*value = result->get_intx();
@@ -690,8 +691,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uintx()) return false;
*value = result->get_uintx();
@@ -718,8 +719,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
*value = result->get_uint64_t();
@@ -746,8 +747,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_size_t()) return false;
*value = result->get_size_t();
@@ -774,8 +775,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_double()) return false;
*value = result->get_double();
@@ -802,8 +803,8 @@
faddr->set_origin(origin);
}
-bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) {
- Flag* result = Flag::find_flag(name, len);
+bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
+ Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_ccstr()) return false;
*value = result->get_ccstr();
@@ -818,15 +819,12 @@
trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
char* new_value = NULL;
if (*value != NULL) {
- new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
- strcpy(new_value, *value);
+ new_value = os::strdup_check_oom(*value);
}
result->set_ccstr(new_value);
if (result->is_default() && old_value != NULL) {
// Prior value is NOT heap allocated, but was a literal constant.
- char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
- strcpy(old_value_to_free, old_value);
- old_value = old_value_to_free;
+ old_value = os::strdup_check_oom(old_value);
}
*value = old_value;
result->set_origin(origin);
@@ -838,8 +836,7 @@
guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
ccstr old_value = faddr->get_ccstr();
trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
- char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
- strcpy(new_value, value);
+ char* new_value = os::strdup_check_oom(value);
faddr->set_ccstr(new_value);
if (!faddr->is_default() && old_value != NULL) {
// Prior value is heap allocated so free it.
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -379,38 +379,38 @@
class CommandLineFlags {
public:
- static bool boolAt(const char* name, size_t len, bool* value);
- static bool boolAt(const char* name, bool* value) { return boolAt(name, strlen(name), value); }
+ static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
+ static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
- static bool intxAt(const char* name, size_t len, intx* value);
- static bool intxAt(const char* name, intx* value) { return intxAt(name, strlen(name), value); }
+ static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
+ static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
- static bool uintxAt(const char* name, size_t len, uintx* value);
- static bool uintxAt(const char* name, uintx* value) { return uintxAt(name, strlen(name), value); }
+ static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
+ static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
- static bool size_tAt(const char* name, size_t len, size_t* value);
- static bool size_tAt(const char* name, size_t* value) { return size_tAt(name, strlen(name), value); }
+ static bool size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
+ static bool size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
static bool size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
- static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
- static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
+ static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
+ static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
- static bool doubleAt(const char* name, size_t len, double* value);
- static bool doubleAt(const char* name, double* value) { return doubleAt(name, strlen(name), value); }
+ static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
+ static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
- static bool ccstrAt(const char* name, size_t len, ccstr* value);
- static bool ccstrAt(const char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); }
+ static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
+ static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
// Contract: Flag will make private copy of the incoming value.
// Outgoing value is always malloc-ed, and caller MUST call free.
static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
@@ -2066,9 +2066,6 @@
"Provide more detailed and expensive TLAB statistics " \
"(with PrintTLAB)") \
\
- EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \
- "Enable LowMemoryProtection")) \
- \
product_pd(bool, NeverActAsServerClassMachine, \
"Never act like a server-class machine") \
\
@@ -2984,12 +2981,6 @@
product(intx, SafepointTimeoutDelay, 10000, \
"Delay in milliseconds for option SafepointTimeout") \
\
- product(intx, NmethodSweepFraction, 16, \
- "Number of invocations of sweeper to cover all nmethods") \
- \
- product(intx, NmethodSweepCheckInterval, 5, \
- "Compilers wake up every n seconds to possibly sweep nmethods") \
- \
product(intx, NmethodSweepActivity, 10, \
"Removes cold nmethods from code cache if > 0. Higher values " \
"result in more aggressive sweeping") \
@@ -3378,9 +3369,6 @@
product_pd(uintx, NonNMethodCodeHeapSize, \
"Size of code heap with non-nmethods (in bytes)") \
\
- product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
- "When less than X space left, we stop compiling") \
- \
product_pd(uintx, CodeCacheExpansionSize, \
"Code cache expansion size (in bytes)") \
\
@@ -3393,6 +3381,11 @@
product(bool, UseCodeCacheFlushing, true, \
"Remove cold/old nmethods from the code cache") \
\
+ product(uintx, StartAggressiveSweepingAt, 10, \
+ "Start aggressive sweeping if X[%] of the code cache is free." \
+ "Segmented code cache: X[%] of the non-profiled heap." \
+ "Non-segmented code cache: X[%] of the total code cache") \
+ \
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -187,19 +187,22 @@
# endif
-
+// invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
int deoptimizeAllCounter = 0;
int zombieAllCounter = 0;
-
void InterfaceSupport::zombieAll() {
- if (is_init_completed() && zombieAllCounter > ZombieALotInterval) {
+ // This method is called by all threads when a thread make
+ // transition to VM state (for example, runtime calls).
+ // Divide number of calls by number of threads to avoid
+ // dependence of ZombieAll events frequency on number of threads.
+ int value = zombieAllCounter / Threads::number_of_threads();
+ if (is_init_completed() && value > ZombieALotInterval) {
zombieAllCounter = 0;
VM_ZombieAll op;
VMThread::execute(&op);
- } else {
- zombieAllCounter++;
}
+ zombieAllCounter++;
}
void InterfaceSupport::unlinkSymbols() {
@@ -208,12 +211,17 @@
}
void InterfaceSupport::deoptimizeAll() {
- if (is_init_completed() ) {
- if (DeoptimizeALot && deoptimizeAllCounter > DeoptimizeALotInterval) {
+ // This method is called by all threads when a thread make
+ // transition to VM state (for example, runtime calls).
+ // Divide number of calls by number of threads to avoid
+ // dependence of DeoptimizeAll events frequency on number of threads.
+ int value = deoptimizeAllCounter / Threads::number_of_threads();
+ if (is_init_completed()) {
+ if (DeoptimizeALot && value > DeoptimizeALotInterval) {
deoptimizeAllCounter = 0;
VM_DeoptimizeAll op;
VMThread::execute(&op);
- } else if (DeoptimizeRandom && (deoptimizeAllCounter & 0x1f) == (os::random() & 0x1f)) {
+ } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
VM_DeoptimizeAll op;
VMThread::execute(&op);
}
--- a/hotspot/src/share/vm/runtime/java.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/java.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -705,25 +705,35 @@
}
void JDK_Version::to_string(char* buffer, size_t buflen) const {
+ assert(buffer && buflen > 0, "call with useful buffer");
size_t index = 0;
+
if (!is_valid()) {
jio_snprintf(buffer, buflen, "%s", "(uninitialized)");
} else if (is_partially_initialized()) {
jio_snprintf(buffer, buflen, "%s", "(uninitialized) pre-1.6.0");
} else {
- index += jio_snprintf(
+ int rc = jio_snprintf(
&buffer[index], buflen - index, "%d.%d", _major, _minor);
+ if (rc == -1) return;
+ index += rc;
if (_micro > 0) {
- index += jio_snprintf(&buffer[index], buflen - index, ".%d", _micro);
+ rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _micro);
}
if (_update > 0) {
- index += jio_snprintf(&buffer[index], buflen - index, "_%02d", _update);
+ rc = jio_snprintf(&buffer[index], buflen - index, "_%02d", _update);
+ if (rc == -1) return;
+ index += rc;
}
if (_special > 0) {
- index += jio_snprintf(&buffer[index], buflen - index, "%c", _special);
+ rc = jio_snprintf(&buffer[index], buflen - index, "%c", _special);
+ if (rc == -1) return;
+ index += rc;
}
if (_build > 0) {
- index += jio_snprintf(&buffer[index], buflen - index, "-b%02d", _build);
+ rc = jio_snprintf(&buffer[index], buflen - index, "-b%02d", _build);
+ if (rc == -1) return;
+ index += rc;
}
}
}
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -61,7 +61,7 @@
Mutex* StringTable_lock = NULL;
Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL;
-Mutex* CodeCache_lock = NULL;
+Monitor* CodeCache_lock = NULL;
Mutex* MethodData_lock = NULL;
Mutex* RetData_lock = NULL;
Monitor* VMOperationQueue_lock = NULL;
@@ -205,7 +205,7 @@
}
def(ParGCRareEvent_lock , Mutex , leaf , true );
def(DerivedPointerTableGC_lock , Mutex, leaf, true );
- def(CodeCache_lock , Mutex , special, true );
+ def(CodeCache_lock , Monitor, special, true );
def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
def(RawMonitor_lock , Mutex, special, true );
def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -53,7 +53,7 @@
extern Mutex* StringTable_lock; // a lock on the interned string table
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
-extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
+extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -571,17 +571,6 @@
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
-#if INCLUDE_NMT
- // NMT can not track malloc allocation size > MAX_MALLOC_SIZE, which is
- // (1GB - 1) on 32-bit system. It is not an issue on 64-bit system, where
- // MAX_MALLOC_SIZE = ((1 << 62) - 1).
- // VM code does not have such large malloc allocation. However, it can come
- // Unsafe call.
- if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
- return NULL;
- }
-#endif
-
#ifdef ASSERT
// checking for the WatcherThread and crash_protection first
// since os::malloc can be called when the libjvm.{dll,so} is
@@ -652,12 +641,6 @@
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
-#if INCLUDE_NMT
- // See comments in os::malloc() above
- if (MemTracker::tracking_level() >= NMT_summary && size > MAX_MALLOC_SIZE) {
- return NULL;
- }
-#endif
#ifndef ASSERT
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
--- a/hotspot/src/share/vm/runtime/os.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -158,7 +158,6 @@
static void init_globals(void) { // Called from init_globals() in init.cpp
init_globals_ext();
}
- static void init_3(void); // Called at the end of vm init
// File names are case-insensitive on windows only
// Override me as needed
@@ -680,28 +679,10 @@
// SocketInterface (ex HPI SocketInterface )
static int socket(int domain, int type, int protocol);
static int socket_close(int fd);
- static int socket_shutdown(int fd, int howto);
static int recv(int fd, char* buf, size_t nBytes, uint flags);
static int send(int fd, char* buf, size_t nBytes, uint flags);
static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
- static int timeout(int fd, long timeout);
- static int listen(int fd, int count);
static int connect(int fd, struct sockaddr* him, socklen_t len);
- static int bind(int fd, struct sockaddr* him, socklen_t len);
- static int accept(int fd, struct sockaddr* him, socklen_t* len);
- static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
- struct sockaddr* from, socklen_t* fromlen);
- static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
- static int sendto(int fd, char* buf, size_t len, uint flags,
- struct sockaddr* to, socklen_t tolen);
- static int socket_available(int fd, jint* pbytes);
-
- static int get_sock_opt(int fd, int level, int optname,
- char* optval, socklen_t* optlen);
- static int set_sock_opt(int fd, int level, int optname,
- const char* optval, socklen_t optlen);
- static int get_host_name(char* name, int namelen);
-
static struct hostent* get_host_by_name(char* name);
// Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -2421,8 +2421,6 @@
// CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread.
- MutexUnlocker mu(AdapterHandlerLibrary_lock);
- CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
return NULL; // Out of CodeCache space
}
entry->relocate(new_adapter->content_begin());
@@ -2594,9 +2592,6 @@
CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
}
nm->post_compiled_method_load_event();
- } else {
- // CodeCache is full, disable compilation
- CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
}
}
--- a/hotspot/src/share/vm/runtime/signature.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/signature.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -158,7 +158,7 @@
uint64_t saved_fingerprint = fingerprint;
// Check for too many arguments
- if ( fingerprint == UCONST64(-1) ) {
+ if (fingerprint == (uint64_t)CONST64(-1)) {
SignatureIterator::iterate_parameters();
return;
}
--- a/hotspot/src/share/vm/runtime/signature.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/signature.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -243,7 +243,7 @@
}
if (mh->size_of_parameters() > max_size_of_parameters ) {
- _fingerprint = UCONST64(-1);
+ _fingerprint = (uint64_t)CONST64(-1);
mh->constMethod()->set_fingerprint(_fingerprint);
return _fingerprint;
}
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -196,7 +196,6 @@
// Don't trigger other compiles in testing mode
return NULL;
}
- nmethod *osr_nm = NULL;
handle_counter_overflow(method());
if (method() != inlinee()) {
@@ -210,14 +209,16 @@
if (bci == InvocationEntryBci) {
method_invocation_event(method, inlinee, comp_level, nm, thread);
} else {
+ // method == inlinee if the event originated in the main method
method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
- // method == inlinee if the event originated in the main method
- int highest_level = inlinee->highest_osr_comp_level();
- if (highest_level > comp_level) {
- osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false);
+ // Check if event led to a higher level OSR compilation
+ nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false);
+ if (osr_nm != NULL && osr_nm->comp_level() > comp_level) {
+ // Perform OSR with new nmethod
+ return osr_nm;
}
}
- return osr_nm;
+ return NULL;
}
// Check if the method can be compiled, change level if necessary
--- a/hotspot/src/share/vm/runtime/sweeper.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -52,7 +52,6 @@
class SweeperRecord {
public:
int traversal;
- int invocation;
int compile_id;
long traversal_mark;
int state;
@@ -62,10 +61,9 @@
int line;
void print() {
- tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
+ tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
PTR_FORMAT " state = %d traversal_mark %d line = %d",
traversal,
- invocation,
compile_id,
kind == NULL ? "" : kind,
uep,
@@ -117,7 +115,6 @@
if (_records != NULL) {
_records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
- _records[_sweep_index].invocation = _sweep_fractions_left;
_records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state;
@@ -127,6 +124,14 @@
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
}
}
+
+void NMethodSweeper::init_sweeper_log() {
+ if (LogSweeper && _records == NULL) {
+ // Create the ring buffer for the logging code
+ _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
+ memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
+ }
+}
#else
#define SWEEP(nm)
#endif
@@ -142,8 +147,6 @@
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
-volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
-volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
@@ -190,13 +193,15 @@
}
return _hotness_counter_reset_val;
}
-bool NMethodSweeper::sweep_in_progress() {
- return !_current.end();
+bool NMethodSweeper::wait_for_stack_scanning() {
+ return _current.end();
}
-// Scans the stacks of all Java threads and marks activations of not-entrant methods.
-// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
-// safepoint.
+/**
+ * Scans the stacks of all Java threads and marks activations of not-entrant methods.
+ * No need to synchronize access, since 'mark_active_nmethods' is always executed at a
+ * safepoint.
+ */
void NMethodSweeper::mark_active_nmethods() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
// If we do not want to reclaim not-entrant or zombie methods there is no need
@@ -210,9 +215,8 @@
// Check for restart
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
- if (!sweep_in_progress()) {
+ if (wait_for_stack_scanning()) {
_seen = 0;
- _sweep_fractions_left = NmethodSweepFraction;
_current = NMethodIterator();
// Initialize to first nmethod
_current.next();
@@ -231,6 +235,64 @@
OrderAccess::storestore();
}
+
+/**
+ * This function triggers a VM operation that does stack scanning of active
+ * methods. Stack scanning is mandatory for the sweeper to make progress.
+ */
+void NMethodSweeper::do_stack_scanning() {
+ assert(!CodeCache_lock->owned_by_self(), "just checking");
+ if (wait_for_stack_scanning()) {
+ VM_MarkActiveNMethods op;
+ VMThread::execute(&op);
+ _should_sweep = true;
+ }
+}
+
+void NMethodSweeper::sweeper_loop() {
+ bool timeout;
+ while (true) {
+ {
+ ThreadBlockInVM tbivm(JavaThread::current());
+ MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ const long wait_time = 60*60*24 * 1000;
+ timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time);
+ }
+ if (!timeout) {
+ possibly_sweep();
+ }
+ }
+}
+
+/**
+ * Wakes up the sweeper thread to possibly sweep.
+ */
+void NMethodSweeper::notify(int code_blob_type) {
+ // Makes sure that we do not invoke the sweeper too often during startup.
+ double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
+ double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
+ if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ CodeCache_lock->notify();
+ }
+}
+
+/**
+ * Handle a safepoint request
+ */
+void NMethodSweeper::handle_safepoint_request() {
+ if (SafepointSynchronize::is_synchronizing()) {
+ if (PrintMethodFlushing && Verbose) {
+ tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nof_nmethods());
+ }
+ MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+ JavaThread* thread = JavaThread::current();
+ ThreadBlockInVM tbivm(thread);
+ thread->java_suspend_self();
+ }
+}
+
/**
* This function invokes the sweeper if at least one of the three conditions is met:
* (1) The code cache is getting full
@@ -239,11 +301,6 @@
*/
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
- // Only compiler threads are allowed to sweep
- if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
- return;
- }
-
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
// This is one of the two places where should_sweep can be set to true. The general
// idea is as follows: If there is enough free space in the code cache, there is no
@@ -280,46 +337,37 @@
}
}
- if (_should_sweep && _sweep_fractions_left > 0) {
- // Only one thread at a time will sweep
- jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
- if (old != 0) {
- return;
- }
-#ifdef ASSERT
- if (LogSweeper && _records == NULL) {
- // Create the ring buffer for the logging code
- _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
- memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
- }
-#endif
+ // Force stack scanning if there is only 10% free space in the code cache.
+ // We force stack scanning only non-profiled code heap gets full, since critical
+ // allocation go to the non-profiled heap and we must be make sure that there is
+ // enough space.
+ double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
+ if (free_percent <= StartAggressiveSweepingAt) {
+ do_stack_scanning();
+ }
- if (_sweep_fractions_left > 0) {
- sweep_code_cache();
- _sweep_fractions_left--;
- }
+ if (_should_sweep) {
+ init_sweeper_log();
+ sweep_code_cache();
+ }
- // We are done with sweeping the code cache once.
- if (_sweep_fractions_left == 0) {
- _total_nof_code_cache_sweeps++;
- _last_sweep = _time_counter;
- // Reset flag; temporarily disables sweeper
- _should_sweep = false;
- // If there was enough state change, 'possibly_enable_sweeper()'
- // sets '_should_sweep' to true
- possibly_enable_sweeper();
- // Reset _bytes_changed only if there was enough state change. _bytes_changed
- // can further increase by calls to 'report_state_change'.
- if (_should_sweep) {
- _bytes_changed = 0;
- }
- }
- // Release work, because another compiler thread could continue.
- OrderAccess::release_store((int*)&_sweep_started, 0);
+ // We are done with sweeping the code cache once.
+ _total_nof_code_cache_sweeps++;
+ _last_sweep = _time_counter;
+ // Reset flag; temporarily disables sweeper
+ _should_sweep = false;
+ // If there was enough state change, 'possibly_enable_sweeper()'
+ // sets '_should_sweep' to true
+ possibly_enable_sweeper();
+ // Reset _bytes_changed only if there was enough state change. _bytes_changed
+ // can further increase by calls to 'report_state_change'.
+ if (_should_sweep) {
+ _bytes_changed = 0;
}
}
void NMethodSweeper::sweep_code_cache() {
+ ResourceMark rm;
Ticks sweep_start_counter = Ticks::now();
_flushed_count = 0;
@@ -327,25 +375,10 @@
_marked_for_reclamation_count = 0;
if (PrintMethodFlushing && Verbose) {
- tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
- }
-
- if (!CompileBroker::should_compile_new_jobs()) {
- // If we have turned off compilations we might as well do full sweeps
- // in order to reach the clean state faster. Otherwise the sleeping compiler
- // threads will slow down sweeping.
- _sweep_fractions_left = 1;
+ tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
}
- // We want to visit all nmethods after NmethodSweepFraction
- // invocations so divide the remaining number of nmethods by the
- // remaining number of invocations. This is only an estimate since
- // the number of nmethods changes during the sweep so the final
- // stage must iterate until it there are no more nmethods.
- int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
int swept_count = 0;
-
-
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
@@ -354,19 +387,9 @@
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// The last invocation iterates until there are no more nmethods
- while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
+ while (!_current.end()) {
swept_count++;
- if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
- if (PrintMethodFlushing && Verbose) {
- tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
- }
- MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-
- assert(Thread::current()->is_Java_thread(), "should be java thread");
- JavaThread* thread = (JavaThread*)Thread::current();
- ThreadBlockInVM tbivm(thread);
- thread->java_suspend_self();
- }
+ handle_safepoint_request();
// Since we will give up the CodeCache_lock, always skip ahead
// to the next nmethod. Other blobs can be deleted by other
// threads but nmethods are only reclaimed by the sweeper.
@@ -382,7 +405,7 @@
}
}
- assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
+ assert(_current.end(), "must have scanned the whole cache");
const Ticks sweep_end_counter = Ticks::now();
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@@ -397,7 +420,6 @@
event.set_starttime(sweep_start_counter);
event.set_endtime(sweep_end_counter);
event.set_sweepIndex(_traversals);
- event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
event.set_sweptCount(swept_count);
event.set_flushedCount(_flushed_count);
event.set_markedCount(_marked_for_reclamation_count);
@@ -407,15 +429,12 @@
#ifdef ASSERT
if(PrintMethodFlushing) {
- tty->print_cr("### sweeper: sweep time(%d): "
- INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
+ tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value());
}
#endif
- if (_sweep_fractions_left == 1) {
- _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
- log_sweep("finished");
- }
+ _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
+ log_sweep("finished");
// Sweeper is the only case where memory is released, check here if it
// is time to restart the compiler. Only checking if there is a certain
@@ -459,10 +478,12 @@
class NMethodMarker: public StackObj {
private:
- CompilerThread* _thread;
+ CodeCacheSweeperThread* _thread;
public:
NMethodMarker(nmethod* nm) {
- _thread = CompilerThread::current();
+ JavaThread* current = JavaThread::current();
+ assert (current->is_Code_cache_sweeper_thread(), "Must be");
+ _thread = (CodeCacheSweeperThread*)JavaThread::current();
if (!nm->is_zombie() && !nm->is_unloaded()) {
// Only expose live nmethods for scanning
_thread->set_scanned_nmethod(nm);
@@ -473,7 +494,7 @@
}
};
-void NMethodSweeper::release_nmethod(nmethod *nm) {
+void NMethodSweeper::release_nmethod(nmethod* nm) {
// Clean up any CompiledICHolders
{
ResourceMark rm;
@@ -490,7 +511,7 @@
nm->flush();
}
-int NMethodSweeper::process_nmethod(nmethod *nm) {
+int NMethodSweeper::process_nmethod(nmethod* nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
int freed_memory = 0;
--- a/hotspot/src/share/vm/runtime/sweeper.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -49,9 +49,7 @@
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
// state change happens during separate sweeps. It may take at least 3 sweeps before an
-// nmethod's space is freed. Sweeping is currently done by compiler threads between
-// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
-// is full.
+// nmethod's space is freed.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
@@ -64,7 +62,6 @@
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
- static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
@@ -85,8 +82,12 @@
static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
- static bool sweep_in_progress();
+ static void init_sweeper_log() NOT_DEBUG_RETURN;
+ static bool wait_for_stack_scanning();
static void sweep_code_cache();
+ static void handle_safepoint_request();
+ static void do_stack_scanning();
+ static void possibly_sweep();
public:
static long traversal_count() { return _traversals; }
@@ -106,7 +107,8 @@
#endif
static void mark_active_nmethods(); // Invoked at the end of each safepoint
- static void possibly_sweep(); // Compiler threads call this to sweep
+ static void sweeper_loop();
+ static void notify(int code_blob_type); // Possibly start the sweeper thread.
static int hotness_counter_reset_val();
static void report_state_change(nmethod* nm);
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -66,6 +66,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/sweeper.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
@@ -1553,6 +1554,7 @@
// Remove this ifdef when C1 is ported to the compiler interface.
static void compiler_thread_entry(JavaThread* thread, TRAPS);
+static void sweeper_thread_entry(JavaThread* thread, TRAPS);
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
Thread()
@@ -3172,6 +3174,10 @@
CompileBroker::compiler_thread_loop();
}
+static void sweeper_thread_entry(JavaThread* thread, TRAPS) {
+ NMethodSweeper::sweeper_loop();
+}
+
// Create a CompilerThread
CompilerThread::CompilerThread(CompileQueue* queue,
CompilerCounters* counters)
@@ -3182,7 +3188,6 @@
_queue = queue;
_counters = counters;
_buffer_blob = NULL;
- _scanned_nmethod = NULL;
_compiler = NULL;
#ifndef PRODUCT
@@ -3190,7 +3195,12 @@
#endif
}
-void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
+// Create sweeper thread
+CodeCacheSweeperThread::CodeCacheSweeperThread()
+: JavaThread(&sweeper_thread_entry) {
+ _scanned_nmethod = NULL;
+}
+void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
JavaThread::oops_do(f, cld_f, cf);
if (_scanned_nmethod != NULL && cf != NULL) {
// Safepoints can occur when the sweeper is scanning an nmethod so
@@ -3607,9 +3617,6 @@
}
}
- // Give os specific code one last chance to start
- os::init_3();
-
create_vm_timer.end();
#ifdef ASSERT
_vm_complete = true;
@@ -3632,7 +3639,7 @@
if (!agent->valid()) {
char buffer[JVM_MAXPATHLEN];
- char ebuf[1024];
+ char ebuf[1024] = "";
const char *name = agent->name();
const char *msg = "Could not find agent library ";
--- a/hotspot/src/share/vm/runtime/thread.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -311,6 +311,7 @@
virtual bool is_VM_thread() const { return false; }
virtual bool is_Java_thread() const { return false; }
virtual bool is_Compiler_thread() const { return false; }
+ virtual bool is_Code_cache_sweeper_thread() const { return false; }
virtual bool is_hidden_from_external_view() const { return false; }
virtual bool is_jvmti_agent_thread() const { return false; }
// True iff the thread can perform GC operations at a safepoint.
@@ -1755,6 +1756,27 @@
return (CompilerThread*)this;
}
+// Dedicated thread to sweep the code cache
+class CodeCacheSweeperThread : public JavaThread {
+ nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
+ public:
+ CodeCacheSweeperThread();
+ // Track the nmethod currently being scanned by the sweeper
+ void set_scanned_nmethod(nmethod* nm) {
+ assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
+ _scanned_nmethod = nm;
+ }
+
+ // Hide sweeper thread from external view.
+ bool is_hidden_from_external_view() const { return true; }
+
+ bool is_Code_cache_sweeper_thread() const { return true; }
+ // GC support
+ // Apply "f->do_oop" to all root oops in "this".
+ // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+ void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
+};
+
// A thread used for Compilation.
class CompilerThread : public JavaThread {
friend class VMStructs;
@@ -1767,7 +1789,6 @@
CompileQueue* _queue;
BufferBlob* _buffer_blob;
- nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
AbstractCompiler* _compiler;
public:
@@ -1801,28 +1822,17 @@
_log = log;
}
- // GC support
- // Apply "f->do_oop" to all root oops in "this".
- // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
- void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
-
#ifndef PRODUCT
private:
IdealGraphPrinter *_ideal_graph_printer;
public:
- IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
- void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
+ IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
+ void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
#endif
// Get/set the thread's current task
- CompileTask* task() { return _task; }
- void set_task(CompileTask* task) { _task = task; }
-
- // Track the nmethod currently being scanned by the sweeper
- void set_scanned_nmethod(nmethod* nm) {
- assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
- _scanned_nmethod = nm;
- }
+ CompileTask* task() { return _task; }
+ void set_task(CompileTask* task) { _task = task; }
};
inline CompilerThread* CompilerThread::current() {
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1594,6 +1594,7 @@
declare_type(JvmtiAgentThread, JavaThread) \
declare_type(ServiceThread, JavaThread) \
declare_type(CompilerThread, JavaThread) \
+ declare_type(CodeCacheSweeperThread, JavaThread) \
declare_toplevel_type(OSThread) \
declare_toplevel_type(JavaFrameAnchor) \
\
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -111,6 +111,9 @@
CodeCache::make_marked_nmethods_zombies();
}
+void VM_MarkActiveNMethods::doit() {
+ NMethodSweeper::mark_active_nmethods();
+}
VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
_thread = thread;
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -100,6 +100,7 @@
template(RotateGCLog) \
template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \
+ template(MarkActiveNMethods) \
template(PrintCompileQueue) \
template(PrintCodeList) \
template(PrintCodeCache) \
@@ -252,6 +253,13 @@
bool allow_nested_vm_operations() const { return true; }
};
+class VM_MarkActiveNMethods: public VM_Operation {
+ public:
+ VM_MarkActiveNMethods() {}
+ VMOp_Type type() const { return VMOp_MarkActiveNMethods; }
+ void doit();
+ bool allow_nested_vm_operations() const { return true; }
+};
// Deopt helper that can deoptimize frames in threads other than the
// current thread. Only used through Deoptimization::deoptimize_frame.
--- a/hotspot/src/share/vm/services/jmm.h Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/jmm.h Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -143,7 +143,8 @@
JMM_VMGLOBAL_TYPE_UNKNOWN = 0,
JMM_VMGLOBAL_TYPE_JBOOLEAN = 1,
JMM_VMGLOBAL_TYPE_JSTRING = 2,
- JMM_VMGLOBAL_TYPE_JLONG = 3
+ JMM_VMGLOBAL_TYPE_JLONG = 3,
+ JMM_VMGLOBAL_TYPE_JDOUBLE = 4
} jmmVMGlobalType;
typedef enum {
--- a/hotspot/src/share/vm/services/mallocTracker.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/mallocTracker.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -72,7 +72,7 @@
MallocMemorySummary::record_free(size(), flags());
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
- if (tracking_level() == NMT_detail) {
+ if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
}
}
@@ -128,36 +128,18 @@
}
// Uses placement global new operator to initialize malloc header
- switch(level) {
- case NMT_off:
- return malloc_base;
- case NMT_minimal: {
- MallocHeader* hdr = ::new (malloc_base) MallocHeader();
- break;
- }
- case NMT_summary: {
- assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
- header = ::new (malloc_base) MallocHeader(size, flags);
- break;
- }
- case NMT_detail: {
- assert(size <= MAX_MALLOC_SIZE, "malloc size overrun for NMT");
- header = ::new (malloc_base) MallocHeader(size, flags, stack);
- break;
- }
- default:
- ShouldNotReachHere();
+
+ if (level == NMT_off) {
+ return malloc_base;
}
+
+ header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
// The alignment check: 8 bytes alignment for 32 bit systems.
// 16 bytes alignment for 64-bit systems.
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
- // Sanity check
- assert(get_memory_tracking_level(memblock) == level,
- "Wrong tracking level");
-
#ifdef ASSERT
if (level > NMT_minimal) {
// Read back
--- a/hotspot/src/share/vm/services/mallocTracker.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -239,68 +239,46 @@
class MallocHeader VALUE_OBJ_CLASS_SPEC {
#ifdef _LP64
- size_t _size : 62;
- size_t _level : 2;
+ size_t _size : 64;
size_t _flags : 8;
size_t _pos_idx : 16;
size_t _bucket_idx: 40;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
-#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
#else
- size_t _size : 30;
- size_t _level : 2;
+ size_t _size : 32;
size_t _flags : 8;
size_t _pos_idx : 8;
size_t _bucket_idx: 16;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
-// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
-#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
#endif // _LP64
public:
- // Summary tracking header
- MallocHeader(size_t size, MEMFLAGS flags) {
- assert(sizeof(MallocHeader) == sizeof(void*) * 2,
- "Wrong header size");
-
- _level = NMT_summary;
- _flags = flags;
- set_size(size);
- MallocMemorySummary::record_malloc(size, flags);
- MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
- }
- // Detail tracking header
- MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
+ MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
- _level = NMT_detail;
+ if (level == NMT_minimal) {
+ return;
+ }
+
_flags = flags;
set_size(size);
- size_t bucket_idx;
- size_t pos_idx;
- if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
- assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
- assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
- _bucket_idx = bucket_idx;
- _pos_idx = pos_idx;
+ if (level == NMT_detail) {
+ size_t bucket_idx;
+ size_t pos_idx;
+ if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
+ assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
+ assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
+ _bucket_idx = bucket_idx;
+ _pos_idx = pos_idx;
+ }
}
+
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
}
- // Minimal tracking header
- MallocHeader() {
- assert(sizeof(MallocHeader) == sizeof(void*) * 2,
- "Wrong header size");
-
- _level = (unsigned short)NMT_minimal;
- }
-
- inline NMT_TrackingLevel tracking_level() const {
- return (NMT_TrackingLevel)_level;
- }
inline size_t size() const { return _size; }
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
@@ -311,7 +289,6 @@
private:
inline void set_size(size_t size) {
- assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
_size = size;
}
bool record_malloc_site(const NativeCallStack& stack, size_t size,
@@ -347,10 +324,6 @@
// Record free on specified memory block
static void* record_free(void* memblock);
- // Get tracking level of specified memory block
- static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
-
-
// Offset memory address to header address
static inline void* get_base(void* memblock);
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
@@ -361,16 +334,12 @@
// Get memory size
static inline size_t get_size(void* memblock) {
MallocHeader* header = malloc_header(memblock);
- assert(header->tracking_level() >= NMT_summary,
- "Wrong tracking level");
return header->size();
}
// Get memory type
static inline MEMFLAGS get_flags(void* memblock) {
MallocHeader* header = malloc_header(memblock);
- assert(header->tracking_level() >= NMT_summary,
- "Wrong tracking level");
return header->flags();
}
@@ -394,7 +363,6 @@
static inline MallocHeader* malloc_header(void *memblock) {
assert(memblock != NULL, "NULL pointer");
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
- assert(header->tracking_level() >= NMT_minimal, "Bad header");
return header;
}
};
--- a/hotspot/src/share/vm/services/mallocTracker.inline.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/mallocTracker.inline.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -28,13 +28,6 @@
#include "services/mallocTracker.hpp"
#include "services/memTracker.hpp"
-inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
- assert(memblock != NULL, "Sanity check");
- if (MemTracker::tracking_level() == NMT_off) return NMT_off;
- MallocHeader* header = malloc_header(memblock);
- return header->tracking_level();
-}
-
inline void* MallocTracker::get_base(void* memblock){
return get_base(memblock, MemTracker::tracking_level());
}
--- a/hotspot/src/share/vm/services/management.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/management.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -1333,7 +1333,7 @@
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
for (j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
- assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
+ assert(monitor != NULL, "must be a Java object");
monitors_array->obj_at_put(count, monitor);
depths_array->int_at_put(count, depth);
count++;
@@ -1343,7 +1343,7 @@
GrowableArray<oop>* jni_locked_monitors = stacktrace->jni_locked_monitors();
for (j = 0; j < jni_locked_monitors->length(); j++) {
oop object = jni_locked_monitors->at(j);
- assert(object != NULL && object->is_instance(), "must be a Java object");
+ assert(object != NULL, "must be a Java object");
monitors_array->obj_at_put(count, object);
// Monitor locked via JNI MonitorEnter call doesn't have stack depth info
depths_array->int_at_put(count, -1);
@@ -1572,6 +1572,9 @@
} else if (flag->is_uint64_t()) {
global->value.j = (jlong)flag->get_uint64_t();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
+ } else if (flag->is_double()) {
+ global->value.d = (jdouble)flag->get_double();
+ global->type = JMM_VMGLOBAL_TYPE_JDOUBLE;
} else if (flag->is_size_t()) {
global->value.j = (jlong)flag->get_size_t();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
--- a/hotspot/src/share/vm/services/runtimeService.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/runtimeService.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -38,6 +38,7 @@
PerfCounter* RuntimeService::_total_safepoints = NULL;
PerfCounter* RuntimeService::_safepoint_time_ticks = NULL;
PerfCounter* RuntimeService::_application_time_ticks = NULL;
+double RuntimeService::_last_safepoint_sync_time_sec = 0.0;
void RuntimeService::init() {
// Make sure the VM version is initialized
@@ -96,6 +97,7 @@
// update the time stamp to begin recording safepoint time
_safepoint_timer.update();
+ _last_safepoint_sync_time_sec = 0.0;
if (UsePerfData) {
_total_safepoints->inc();
if (_app_timer.is_updated()) {
@@ -108,6 +110,9 @@
if (UsePerfData) {
_sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
}
+ if (PrintGCApplicationStoppedTime) {
+ _last_safepoint_sync_time_sec = last_safepoint_time_sec();
+ }
}
void RuntimeService::record_safepoint_end() {
@@ -119,8 +124,10 @@
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("Total time for which application threads "
- "were stopped: %3.7f seconds",
- last_safepoint_time_sec());
+ "were stopped: %3.7f seconds, "
+ "Stopping threads took: %3.7f seconds",
+ last_safepoint_time_sec(),
+ _last_safepoint_sync_time_sec);
}
// update the time stamp to begin recording app time
--- a/hotspot/src/share/vm/services/runtimeService.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/runtimeService.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -37,6 +37,7 @@
static TimeStamp _safepoint_timer;
static TimeStamp _app_timer;
+ static double _last_safepoint_sync_time_sec;
public:
static void init();
--- a/hotspot/src/share/vm/services/threadService.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/services/threadService.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -597,7 +597,7 @@
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
for (int j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
- assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
+ assert(monitor != NULL, "must be a Java object");
if (monitor == object) {
found = true;
break;
--- a/hotspot/src/share/vm/trace/trace.xml Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/trace/trace.xml Wed Jul 05 20:07:30 2017 +0200
@@ -383,7 +383,6 @@
<event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
has_thread="true" is_requestable="false" is_constant="false">
<value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
- <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
<value type="UINT" field="sweptCount" label="Methods Swept"/>
<value type="UINT" field="flushedCount" label="Methods Flushed"/>
<value type="UINT" field="markedCount" label="Methods Reclaimed"/>
--- a/hotspot/src/share/vm/utilities/debug.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/debug.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -256,16 +256,18 @@
static const char* name[] = {
"shared read only space",
"shared read write space",
- "shared miscellaneous data space"
+ "shared miscellaneous data space",
+ "shared miscellaneous code space"
};
static const char* flag[] = {
"SharedReadOnlySize",
"SharedReadWriteSize",
- "SharedMiscDataSize"
+ "SharedMiscDataSize",
+ "SharedMiscCodeSize"
};
warning("\nThe %s is not large enough\n"
- "to preload requested classes. Use -XX:%s=\n"
+ "to preload requested classes. Use -XX:%s=<size>\n"
"to increase the initial size of %s.\n",
name[shared_space], flag[shared_space], name[shared_space]);
exit(2);
--- a/hotspot/src/share/vm/utilities/debug.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/debug.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -245,7 +245,8 @@
enum SharedSpaceType {
SharedReadOnly,
SharedReadWrite,
- SharedMiscData
+ SharedMiscData,
+ SharedMiscCode
};
void report_out_of_shared_space(SharedSpaceType space_type);
--- a/hotspot/src/share/vm/utilities/dtrace_disabled.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/dtrace_disabled.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -27,7 +27,7 @@
/* This file contains dummy provider probes needed when compiling a hotspot
* that does not support dtrace probes. This could be because we're building
- * on a system that doesn't suuport dtrace or because we're bulding a variant
+ * on a system that doesn't support dtrace or because we're bulding a variant
* of hotspot (like core) where we do not support dtrace
*/
#if !defined(DTRACE_ENABLED)
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -1048,7 +1048,7 @@
const int badResourceValue = 0xAB; // value used to zap resource area
const int freeBlockPad = 0xBA; // value used to pad freed blocks.
const int uninitBlockPad = 0xF1; // value used to zap newly malloc'd blocks.
-const intptr_t badJNIHandleVal = (intptr_t) CONST64(0xFEFEFEFEFEFEFEFE); // value used to zap jni handle area
+const intptr_t badJNIHandleVal = (intptr_t) UCONST64(0xFEFEFEFEFEFEFEFE); // value used to zap jni handle area
const juint badHeapWordVal = 0xBAADBABE; // value used to zap heap after GC
const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC
const int badCodeHeapNewVal= 0xCC; // value used to zap Code heap at allocation
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Wed Jul 05 20:07:30 2017 +0200
@@ -151,11 +151,11 @@
// Constant for jlong (specifying an long long constant is C++ compiler specific)
// Build a 64bit integer constant on with Visual C++
-#define CONST64(x) (x ## i64)
-#define UCONST64(x) ((uint64_t)CONST64(x))
+#define CONST64(x) (x ## i64)
+#define UCONST64(x) (x ## ui64)
-const jlong min_jlong = CONST64(0x8000000000000000);
-const jlong max_jlong = CONST64(0x7fffffffffffffff);
+const jlong min_jlong = (jlong)UCONST64(0x8000000000000000);
+const jlong max_jlong = CONST64(0x7fffffffffffffff);
//----------------------------------------------------------------------------------------------------
// Miscellaneous
--- a/hotspot/src/share/vm/utilities/ostream.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/ostream.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -76,6 +76,8 @@
const char* format, va_list ap,
bool add_cr,
size_t& result_len) {
+ assert(buflen >= 2, "buffer too small");
+
const char* result;
if (add_cr) buflen--;
if (!strchr(format, '%')) {
@@ -88,14 +90,21 @@
result = va_arg(ap, const char*);
result_len = strlen(result);
if (add_cr && result_len >= buflen) result_len = buflen-1; // truncate
- } else if (vsnprintf(buffer, buflen, format, ap) >= 0) {
+ } else {
+ // Handle truncation:
+ // posix: upon truncation, vsnprintf returns number of bytes which
+ // would have been written (excluding terminating zero) had the buffer
+ // been large enough
+ // windows: upon truncation, vsnprintf returns -1
+ const int written = vsnprintf(buffer, buflen, format, ap);
result = buffer;
- result_len = strlen(result);
- } else {
- DEBUG_ONLY(warning("increase O_BUFLEN in ostream.hpp -- output truncated");)
- result = buffer;
- result_len = buflen - 1;
- buffer[result_len] = 0;
+ if (written < (int) buflen && written >= 0) {
+ result_len = written;
+ } else {
+ DEBUG_ONLY(warning("increase O_BUFLEN in ostream.hpp -- output truncated");)
+ result_len = buflen - 1;
+ buffer[result_len] = 0;
+ }
}
if (add_cr) {
if (result != buffer) {
--- a/hotspot/src/share/vm/utilities/vmError.cpp Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/src/share/vm/utilities/vmError.cpp Wed Jul 05 20:07:30 2017 +0200
@@ -975,11 +975,13 @@
// Run error reporting to determine whether or not to report the crash.
if (!transmit_report_done && should_report_bug(first_error->_id)) {
transmit_report_done = true;
- FILE* hs_err = os::open(log.fd(), "r");
+ const int fd2 = ::dup(log.fd());
+ FILE* const hs_err = ::fdopen(fd2, "r");
if (NULL != hs_err) {
ErrorReporter er;
er.call(hs_err, buffer, O_BUFLEN);
}
+ ::fclose(hs_err);
}
if (log.fd() != defaultStream::output_fd()) {
--- a/hotspot/test/TEST.ROOT Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/TEST.ROOT Wed Jul 05 20:07:30 2017 +0200
@@ -30,3 +30,4 @@
keys=cte_test jcmd nmt regression gc stress
groups=TEST.groups [closed/TEST.groups]
+requires.properties=sun.arch.data.model
--- a/hotspot/test/TEST.groups Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/TEST.groups Wed Jul 05 20:07:30 2017 +0200
@@ -87,7 +87,6 @@
runtime/NMT/SummarySanityCheck.java \
runtime/NMT/ThreadedMallocTestType.java \
runtime/NMT/ThreadedVirtualAllocTestType.java \
- runtime/NMT/UnsafeMallocLimit.java \
runtime/NMT/VirtualAllocCommitUncommitRecommit.java \
runtime/NMT/VirtualAllocTestType.java \
runtime/RedefineObject/TestRedefineObject.java \
@@ -198,7 +197,8 @@
# Tests that require compact2 API's
#
-needs_compact2 =
+needs_compact2 = \
+ compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java
# All tests that run on the most minimal configuration: Minimal VM on Compact 1
compact1_minimal = \
@@ -443,6 +443,7 @@
compiler/arraycopy/TestMissingControl.java \
compiler/ciReplay/TestVM_no_comp_level.sh \
compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java \
+ compiler/codecache/CheckSegmentedCodeCache.java \
compiler/codecache/CheckUpperLimit.java \
compiler/codegen/ \
compiler/cpuflags/RestoreMXCSR.java \
@@ -477,9 +478,9 @@
compiler/intrinsics/mathexact/SubExactILoopDependentTest.java \
compiler/intrinsics/stringequals/TestStringEqualsBadLength.java \
compiler/intrinsics/unsafe/UnsafeGetAddressTest.java \
+ compiler/intrinsics/classcast/NullCheckDroppingsTest.java \
compiler/jsr292/ConcurrentClassLoadingTest.java \
compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
- compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java \
compiler/loopopts/TestLogSum.java \
compiler/macronodes/TestEliminateAllocationPhi.java \
compiler/membars/TestMemBarAcquire.java \
@@ -602,3 +603,14 @@
:hotspot_gc \
:hotspot_runtime \
:hotspot_serviceability
+
+#All tests that depends on nashorn extension.
+#
+needs_nashorn = \
+ compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java
+
+#All tests that do not depends on nashorn extension
+#
+not_needs_nashorn = \
+ :jdk \
+ -:needs_nashorh
--- a/hotspot/test/compiler/6896617/Test6896617.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/6896617/Test6896617.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,20 @@
* @test
* @bug 6896617
* @summary Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() with SSE instructions on x86
+ * @library /testlibrary
* @run main/othervm/timeout=1200 -Xbatch -Xmx256m Test6896617
*
*/
-import java.util.*;
-import java.nio.*;
-import java.nio.charset.*;
+import com.oracle.java.testlibrary.Utils;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CharsetEncoder;
+import java.nio.charset.CodingErrorAction;
+import java.util.Arrays;
+import java.util.Random;
public class Test6896617 {
final static int SIZE = 256;
@@ -54,7 +61,7 @@
sun.nio.cs.ArrayDecoder arrdec = (sun.nio.cs.ArrayDecoder)dec;
// Populate char[] with chars which can be encoded by ISO_8859_1 (<= 0xFF)
- Random rnd = new Random(0);
+ Random rnd = Utils.getRandomInstance();
int maxchar = 0xFF;
char[] a = new char[SIZE];
byte[] b = new byte[SIZE];
--- a/hotspot/test/compiler/7100757/Test7100757.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/7100757/Test7100757.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,13 @@
* @test
* @bug 7100757
* @summary The BitSet.nextSetBit() produces incorrect result in 32bit VM on Sparc
- *
+ * @library /testlibrary
* @run main/timeout=300 Test7100757
*/
-import java.util.*;
+import com.oracle.java.testlibrary.Utils;
+import java.util.BitSet;
+import java.util.Random;
public class Test7100757 {
@@ -39,7 +41,7 @@
public static void main(String[] args) {
BitSet bs = new BitSet(NBITS);
- Random rnd = new Random();
+ Random rnd = Utils.getRandomInstance();
long[] ra = new long[(NBITS+63)/64];
for(int l=0; l < 5000000; l++) {
--- a/hotspot/test/compiler/7177917/Test7177917.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/7177917/Test7177917.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,14 @@
* Micro-benchmark for Math.pow() and Math.exp()
*/
-import java.util.*;
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
public class Test7177917 {
static double d;
- static Random r = new Random(0);
+ static final Random R = Utils.getRandomInstance();
static long m_pow(double[][] values) {
double res = 0;
@@ -59,10 +60,10 @@
static double[][] pow_values(int nb) {
double[][] res = new double[nb][2];
for (int i = 0; i < nb; i++) {
- double ylogx = (1 + (r.nextDouble() * 2045)) - 1023; // 2045 rather than 2046 as a safety margin
- double x = Math.abs(Double.longBitsToDouble(r.nextLong()));
+ double ylogx = (1 + (R.nextDouble() * 2045)) - 1023; // 2045 rather than 2046 as a safety margin
+ double x = Math.abs(Double.longBitsToDouble(R.nextLong()));
while (x != x) {
- x = Math.abs(Double.longBitsToDouble(r.nextLong()));
+ x = Math.abs(Double.longBitsToDouble(R.nextLong()));
}
double logx = Math.log(x) / Math.log(2);
double y = ylogx / logx;
@@ -76,7 +77,7 @@
static double[] exp_values(int nb) {
double[] res = new double[nb];
for (int i = 0; i < nb; i++) {
- double ylogx = (1 + (r.nextDouble() * 2045)) - 1023; // 2045 rather than 2046 as a safety margin
+ double ylogx = (1 + (R.nextDouble() * 2045)) - 1023; // 2045 rather than 2046 as a safety margin
double x = Math.E;
double logx = Math.log(x) / Math.log(2);
double y = ylogx / logx;
--- a/hotspot/test/compiler/7184394/TestAESBase.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/7184394/TestAESBase.java Wed Jul 05 20:07:30 2017 +0200
@@ -26,15 +26,13 @@
* @author Tom Deneau
*/
+import com.oracle.java.testlibrary.Utils;
+import java.security.AlgorithmParameters;
+import java.util.Random;
import javax.crypto.Cipher;
-import javax.crypto.KeyGenerator;
import javax.crypto.SecretKey;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
-import java.security.AlgorithmParameters;
-
-import java.util.Random;
-import java.util.Arrays;
abstract public class TestAESBase {
int msgSize = Integer.getInteger("msgSize", 646);
@@ -59,7 +57,7 @@
byte[] expectedEncode;
byte[] decode;
byte[] expectedDecode;
- Random random = new Random(0);
+ final Random random = Utils.getRandomInstance();
Cipher cipher;
Cipher dCipher;
AlgorithmParameters algParams;
--- a/hotspot/test/compiler/7184394/TestAESMain.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/7184394/TestAESMain.java Wed Jul 05 20:07:30 2017 +0200
@@ -26,6 +26,7 @@
* @test
* @bug 7184394
* @summary add intrinsics to use AES instructions
+ * @library /testlibrary
*
* @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC TestAESMain
* @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC -DencInputOffset=1 TestAESMain
--- a/hotspot/test/compiler/8005956/PolynomialRoot.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/8005956/PolynomialRoot.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,4 +1,3 @@
-//package com.polytechnik.utils;
/*
* (C) Vladislav Malyshkin 2010
* This file is under GPL version 3.
@@ -14,10 +13,14 @@
* @test
* @bug 8005956
* @summary C2: assert(!def_outside->member(r)) failed: Use of external LRG overlaps the same LRG defined in this block
-*
+* @library /testlibrary
* @run main/timeout=300 PolynomialRoot
*/
+import com.oracle.java.testlibrary.Utils;
+import java.util.Arrays;
+import java.util.Random;
+
public class PolynomialRoot {
@@ -57,7 +60,7 @@
public static int root4(final double [] p,final double [] re_root,final double [] im_root)
{
- if(PRINT_DEBUG) System.err.println("=====================root4:p="+java.util.Arrays.toString(p));
+ if (PRINT_DEBUG) { System.err.println("=====================root4:p=" + Arrays.toString(p)); }
final double vs=p[4];
if(PRINT_DEBUG) System.err.println("p[4]="+p[4]);
if(!(Math.abs(vs)>EPS))
@@ -367,7 +370,7 @@
- static void setRandomP(final double [] p,final int n,java.util.Random r)
+ static void setRandomP(final double [] p, final int n, Random r)
{
if(r.nextDouble()<0.1)
{
@@ -465,7 +468,7 @@
static void testRoots(final int n,
final int n_tests,
- final java.util.Random rn,
+ final Random rn,
final double eps)
{
final double [] p=new double [n+1];
@@ -763,7 +766,7 @@
final long t0=System.currentTimeMillis();
final double eps=1e-6;
//checkRoots();
- final java.util.Random r=new java.util.Random(-1381923);
+ final Random r = Utils.getRandomInstance();
printSpecialValues();
final int n_tests=100000;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/EliminateAutoBox/UnsignedLoads.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @run main/othervm -Xbatch -XX:+EliminateAutoBox
+ * -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
+ * UnsignedLoads
+ */
+import static com.oracle.java.testlibrary.Asserts.assertEQ;
+
+public class UnsignedLoads {
+ public static int testUnsignedByte() {
+ byte[] bytes = new byte[] {-1};
+ int res = 0;
+ for (int i = 0; i < 100000; i++) {
+ for (Byte b : bytes) {
+ res = b & 0xff;
+ }
+ }
+ return res;
+ }
+
+ public static int testUnsignedShort() {
+ int res = 0;
+ short[] shorts = new short[] {-1};
+ for (int i = 0; i < 100000; i++) {
+ for (Short s : shorts) {
+ res = s & 0xffff;
+ }
+ }
+ return res;
+ }
+
+ public static void main(String[] args) {
+ assertEQ(testUnsignedByte(), 255);
+ assertEQ(testUnsignedShort(), 65535);
+ System.out.println("TEST PASSED");
+ }
+}
--- a/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java Wed Jul 05 20:07:30 2017 +0200
@@ -22,15 +22,20 @@
*/
import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
/*
* @test CheckSegmentedCodeCache
* @bug 8015774
+ * @library /testlibrary /testlibrary/whitebox
* @summary "Checks VM options related to the segmented code cache"
- * @library /testlibrary
- * @run main/othervm CheckSegmentedCodeCache
+ * @build CheckSegmentedCodeCache
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI CheckSegmentedCodeCache
*/
public class CheckSegmentedCodeCache {
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
// Code heap names
private static final String NON_METHOD = "CodeHeap 'non-nmethods'";
private static final String PROFILED = "CodeHeap 'profiled nmethods'";
@@ -133,8 +138,11 @@
failsWith(pb, "Invalid code heap sizes");
// Fails if not enough space for VM internal code
+ long minUseSpace = WHITE_BOX.getUintxVMFlag("CodeCacheMinimumUseSpace");
+ // minimum size: CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)
+ long minSize = (Platform.isDebugBuild() ? 3 : 1) * minUseSpace;
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
- "-XX:ReservedCodeCacheSize=1700K",
+ "-XX:ReservedCodeCacheSize=" + minSize,
"-XX:InitialCodeCacheSize=100K");
failsWith(pb, "Not enough space in non-nmethod code heap to run VM");
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/debug/TraceIterativeGVN.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @run main/othervm -Xbatch -XX:-TieredCompilation
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceIterativeGVN
+ * TraceIterativeGVN
+ */
+public class TraceIterativeGVN {
+ public static void main(String[] args) {
+ for (int i = 0; i < 100_000; i++) {
+ Byte.valueOf((byte)0);
+ }
+ System.out.println("TEST PASSED");
+ }
+}
--- a/hotspot/test/compiler/exceptions/CatchInlineExceptions.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/exceptions/CatchInlineExceptions.java Wed Jul 05 20:07:30 2017 +0200
@@ -70,7 +70,7 @@
if (counter1 != 0) {
throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0");
}
- if (counter2 != counter) {
+ if (counter2 != counter0) {
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")");
}
if (counter2 != counter) {
--- a/hotspot/test/compiler/intrinsics/bmi/BMITestRunner.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/BMITestRunner.java Wed Jul 05 20:07:30 2017 +0200
@@ -22,13 +22,17 @@
*
*/
-import java.util.*;
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
-import java.nio.charset.StandardCharsets;
-
-import com.oracle.java.testlibrary.*;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
/**
* Test runner that invokes all methods implemented by particular Expr
@@ -69,7 +73,7 @@
String... additionalVMOpts)
throws Throwable {
- int seed = new Random().nextInt();
+ int seed = Utils.getRandomInstance().nextInt();
int iterations = DEFAULT_ITERATIONS_COUNT;
for (String testOption : testOpts) {
@@ -81,8 +85,6 @@
}
}
- System.out.println("Running test with seed: " + seed);
-
OutputAnalyzer intOutput = runTest(expr, VMMode.INT,
additionalVMOpts,
seed, iterations);
@@ -139,9 +141,9 @@
Collections.addAll(vmOpts, new String[] {
"-XX:+DisplayVMOutputToStderr",
+ "-D" + Utils.SEED_PROPERTY_NAME + "=" + seed,
Executor.class.getName(),
expr.getName(),
- new Integer(seed).toString(),
new Integer(iterations).toString()
});
@@ -179,16 +181,15 @@
public static class Executor {
/**
- * Usage: BMITestRunner$Executor <ExprClassName> <seed> <iterations>
+ * Usage: BMITestRunner$Executor <ExprClassName> <iterations>
*/
public static void main(String args[]) throws Exception {
@SuppressWarnings("unchecked")
Class<? extends Expr> exprClass =
(Class<? extends Expr>)Class.forName(args[0]);
Expr expr = exprClass.getConstructor().newInstance();
- Random rng = new Random(Integer.valueOf(args[1]));
- int iterations = Integer.valueOf(args[2]);
- runTests(expr, iterations, rng);
+ int iterations = Integer.valueOf(args[1]);
+ runTests(expr, iterations, Utils.getRandomInstance());
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestAndnI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. "+
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(AndnIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(AndnICommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestAndnL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(AndnLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(AndnLCommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsiI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsiIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsiICommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsiL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsiLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsiLCommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsmskI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsmskIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsmskICommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsmskL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsmskLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsmskLCommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsrI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsrIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsrICommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestBlsrL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,14 +41,14 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(BlsrLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
BMITestRunner.runTests(BlsrLCommutativeExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseBMI1Instructions");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestLzcntI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,12 +41,11 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("lzcnt")) {
- System.out.println("CPU does not support lzcnt feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support lzcnt feature.");
}
BMITestRunner.runTests(LzcntIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountLeadingZerosInstruction");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestLzcntL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,12 +41,11 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("lzcnt")) {
- System.out.println("CPU does not support lzcnt feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support lzcnt feature.");
}
BMITestRunner.runTests(LzcntLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountLeadingZerosInstruction");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestTzcntI.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntI.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,12 +41,11 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(TzcntIExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountTrailingZerosInstruction");
}
--- a/hotspot/test/compiler/intrinsics/bmi/TestTzcntL.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntL.java Wed Jul 05 20:07:30 2017 +0200
@@ -41,12 +41,11 @@
public static void main(String args[]) throws Throwable {
if (!CPUInfo.hasFeature("bmi1")) {
- System.out.println("CPU does not support bmi1 feature. " +
- "Test skipped.");
- return;
+ System.out.println("INFO: CPU does not support bmi1 feature.");
}
BMITestRunner.runTests(TzcntLExpr.class, args,
+ "-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+UseCountTrailingZerosInstruction");
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/classcast/NullCheckDroppingsTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test NullCheckDroppingsTest
+ * @bug 8054492
+ * @summary "Casting can result in redundant null checks in generated code"
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/com/oracle/java/testlibrary
+ * @build NullCheckDroppingsTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -Xmixed -XX:-BackgroundCompilation -XX:-TieredCompilation -XX:CompileThreshold=1000
+ * -XX:CompileCommand=exclude,NullCheckDroppingsTest::runTest NullCheckDroppingsTest
+ */
+
+import sun.hotspot.WhiteBox;
+import sun.hotspot.code.NMethod;
+import com.oracle.java.testlibrary.Platform;
+
+import java.lang.reflect.Method;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.util.function.BiFunction;
+
+public class NullCheckDroppingsTest {
+
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+ static final BiFunction<Class, Object, Object> fCast = (c, o) -> c.cast(o);
+
+ static final MethodHandle SET_SSINK;
+ static final MethodHandle MH_CAST;
+
+ static {
+ try {
+ SET_SSINK = MethodHandles.lookup().findSetter(NullCheckDroppingsTest.class, "ssink", String.class);
+ MH_CAST = MethodHandles.lookup().findVirtual(Class.class,
+ "cast",
+ MethodType.methodType(Object.class, Object.class));
+ }
+ catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ static volatile String svalue = "A";
+ static volatile String snull = null;
+ static volatile Integer iobj = new Integer(0);
+ static volatile int[] arr = new int[2];
+ static volatile Class objClass = String.class;
+ static volatile Class nullClass = null;
+
+ String ssink;
+ Integer isink;
+ int[] asink;
+
+ public static void main(String[] args) throws Exception {
+
+ // Only test C2 in Server VM
+ if (!Platform.isServer()) {
+ return;
+ }
+ // Make sure background compilation is disabled
+ if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+ throw new AssertionError("Background compilation enabled");
+ }
+ // Make sure Tiered compilation is disabled
+ if (WHITE_BOX.getBooleanVMFlag("TieredCompilation")) {
+ throw new AssertionError("Tiered compilation enabled");
+ }
+
+ Method methodClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCast", String.class);
+ Method methodMHCast = NullCheckDroppingsTest.class.getDeclaredMethod("testMHCast", String.class);
+ Method methodMHSetter = NullCheckDroppingsTest.class.getDeclaredMethod("testMHSetter", String.class);
+ Method methodFunction = NullCheckDroppingsTest.class.getDeclaredMethod("testFunction", String.class);
+
+ NullCheckDroppingsTest t = new NullCheckDroppingsTest();
+ t.runTest(methodClassCast, false);
+ t.runTest(methodMHCast, false);
+ t.runTest(methodMHSetter, false);
+ t.runTest(methodFunction, false);
+
+ // Edge cases
+ Method methodClassCastNull = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCastNull", String.class);
+ Method methodNullClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testNullClassCast", String.class);
+ Method methodClassCastObj = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCastObj", Object.class);
+ Method methodObjClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testObjClassCast", String.class);
+ Method methodVarClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testVarClassCast", String.class);
+ Method methodClassCastInt = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCastInt", Object.class);
+ Method methodIntClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testIntClassCast", Object.class);
+ Method methodClassCastint = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCastint", Object.class);
+ Method methodintClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testintClassCast", Object.class);
+ Method methodClassCastPrim = NullCheckDroppingsTest.class.getDeclaredMethod("testClassCastPrim", Object.class);
+ Method methodPrimClassCast = NullCheckDroppingsTest.class.getDeclaredMethod("testPrimClassCast", Object.class);
+
+ t.runTest(methodClassCastNull, false);
+ t.runTest(methodNullClassCast, false);
+ t.runTest(methodClassCastObj, false);
+ t.runTest(methodObjClassCast, true);
+ t.runTest(methodVarClassCast, true);
+ t.runTest(methodClassCastInt, false);
+ t.runTest(methodIntClassCast, true);
+ t.runTest(methodClassCastint, false);
+ t.runTest(methodintClassCast, false);
+ t.runTest(methodClassCastPrim, false);
+ t.runTest(methodPrimClassCast, true);
+ }
+
+ void testClassCast(String s) {
+ try {
+ ssink = String.class.cast(s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testClassCastNull(String s) {
+ try {
+ ssink = String.class.cast(null);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testNullClassCast(String s) {
+ try {
+ ssink = (String)nullClass.cast(s);
+ throw new AssertionError("NullPointerException is not thrown");
+ } catch (NullPointerException t) {
+ // Ignore NullPointerException
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testClassCastObj(Object s) {
+ try {
+ ssink = String.class.cast(s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testObjClassCast(String s) {
+ try {
+ ssink = (String)objClass.cast(s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testVarClassCast(String s) {
+ Class cl = (s == null) ? null : String.class;
+ try {
+ ssink = (String)cl.cast(svalue);
+ if (s == null) {
+ throw new AssertionError("NullPointerException is not thrown");
+ }
+ } catch (NullPointerException t) {
+ // Ignore NullPointerException
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testClassCastInt(Object s) {
+ try {
+ ssink = String.class.cast(iobj);
+ throw new AssertionError("ClassCastException is not thrown");
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast java.lang.Integer to java.lang.String
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testIntClassCast(Object s) {
+ try {
+ isink = Integer.class.cast(s);
+ if (s != null) {
+ throw new AssertionError("ClassCastException is not thrown");
+ }
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast java.lang.String to java.lang.Integer
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testClassCastint(Object s) {
+ try {
+ ssink = String.class.cast(45);
+ throw new AssertionError("ClassCastException is not thrown");
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast java.lang.Integer to java.lang.String
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testintClassCast(Object s) {
+ try {
+ isink = int.class.cast(s);
+ if (s != null) {
+ throw new AssertionError("ClassCastException is not thrown");
+ }
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast java.lang.String to java.lang.Integer
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testClassCastPrim(Object s) {
+ try {
+ ssink = String.class.cast(arr);
+ throw new AssertionError("ClassCastException is not thrown");
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast [I to java.lang.String
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testPrimClassCast(Object s) {
+ try {
+ asink = int[].class.cast(s);
+ if (s != null) {
+ throw new AssertionError("ClassCastException is not thrown");
+ }
+ } catch (ClassCastException t) {
+ // Ignore ClassCastException: Cannot cast java.lang.String to [I
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testMHCast(String s) {
+ try {
+ ssink = (String) (Object) MH_CAST.invokeExact(String.class, (Object) s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testMHSetter(String s) {
+ try {
+ SET_SSINK.invokeExact(this, s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void testFunction(String s) {
+ try {
+ ssink = (String) fCast.apply(String.class, s);
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+
+ void runTest(Method method, boolean deopt) {
+ if (method == null) {
+ throw new AssertionError("method was not found");
+ }
+ // Ensure method is compiled
+ WHITE_BOX.testSetDontInlineMethod(method, true);
+ for (int i = 0; i < 3000; i++) {
+ try {
+ method.invoke(this, svalue);
+ } catch (Exception e) {
+ throw new Error("Unexpected exception: ", e);
+ }
+ }
+ NMethod nm = getNMethod(method);
+
+ // Passing null should cause a de-optimization
+ // if method is compiled with a null-check.
+ try {
+ method.invoke(this, snull);
+ } catch (Exception e) {
+ throw new Error("Unexpected exception: ", e);
+ }
+ checkDeoptimization(method, nm, deopt);
+ }
+
+ static NMethod getNMethod(Method test) {
+ // Because background compilation is disabled, method should now be compiled
+ if (!WHITE_BOX.isMethodCompiled(test)) {
+ throw new AssertionError(test + " not compiled");
+ }
+
+ NMethod nm = NMethod.get(test, false); // not OSR nmethod
+ if (nm == null) {
+ throw new AssertionError(test + " missing nmethod?");
+ }
+ if (nm.comp_level != 4) {
+ throw new AssertionError(test + " compiled by not C2: " + nm);
+ }
+ return nm;
+ }
+
+ static void checkDeoptimization(Method method, NMethod nmOrig, boolean deopt) {
+ // Check deoptimization event (intrinsic Class.cast() works).
+ if (WHITE_BOX.isMethodCompiled(method) == deopt) {
+ throw new AssertionError(method + " was" + (deopt ? " not" : "") + " deoptimized");
+ }
+ if (deopt) {
+ return;
+ }
+ // Ensure no recompilation when no deoptimization is expected.
+ NMethod nm = NMethod.get(method, false); // not OSR nmethod
+ if (nm == null) {
+ throw new AssertionError(method + " missing nmethod?");
+ }
+ if (nm.comp_level != 4) {
+ throw new AssertionError(method + " compiled by not C2: " + nm);
+ }
+ if (nm.compile_id != nmOrig.compile_id) {
+ throw new AssertionError(method + " was recompiled: old nmethod=" + nmOrig + ", new nmethod=" + nm);
+ }
+ }
+}
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8024924
* @summary Test constant addExact
+ * @library /testlibrary
* @compile AddExactIConstantTest.java Verify.java
* @run main AddExactIConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactILoadTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactILoadTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8024924
* @summary Test non constant addExact
+ * @library /testlibrary
* @compile AddExactILoadTest.java Verify.java
* @run main AddExactILoadTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8024924
* @summary Test non constant addExact
+ * @library /testlibrary
* @compile AddExactILoopDependentTest.java Verify.java
* @run main AddExactILoopDependentTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8024924
* @summary Test non constant addExact
+ * @library /testlibrary
* @compile AddExactINonConstantTest.java Verify.java
* @run main AddExactINonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,15 @@
* @test
* @bug 8025657
* @summary Test repeating addExact
+ * @library /testlibrary
* @compile AddExactIRepeatTest.java Verify.java
* @run main AddExactIRepeatTest
*
*/
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
+
public class AddExactIRepeatTest {
public static void main(String[] args) {
runTest(new Verify.AddExactI());
@@ -44,7 +48,7 @@
}
public static void runTest(Verify.BinaryMethod method) {
- java.util.Random rnd = new java.util.Random();
+ Random rnd = Utils.getRandomInstance();
for (int i = 0; i < 50000; ++i) {
int x = Integer.MAX_VALUE - 10;
int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant addExact
+ * @library /testlibrary
* @compile AddExactLConstantTest.java Verify.java
* @run main AddExactLConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant addExact
+ * @library /testlibrary
* @compile AddExactLNonConstantTest.java Verify.java
* @run main AddExactLNonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/DecExactITest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/DecExactITest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test decrementExact
+ * @library /testlibrary
* @compile DecExactITest.java Verify.java
* @run main DecExactITest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/DecExactLTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/DecExactLTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test decrementExact
+ * @library /testlibrary
* @compile DecExactLTest.java Verify.java
* @run main DecExactLTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/IncExactITest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/IncExactITest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test incrementExact
+ * @library /testlibrary
* @compile IncExactITest.java Verify.java
* @run main IncExactITest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/IncExactLTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/IncExactLTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test incrementExact
+ * @library /testlibrary
* @compile IncExactLTest.java Verify.java
* @run main IncExactLTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant multiplyExact
+ * @library /testlibrary
* @compile MulExactIConstantTest.java Verify.java
* @run main MulExactIConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactILoadTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactILoadTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test multiplyExact
+ * @library /testlibrary
* @compile MulExactILoadTest.java Verify.java
* @run main MulExactILoadTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test loop dependent multiplyExact
+ * @library /testlibrary
* @compile MulExactILoopDependentTest.java Verify.java
* @run main MulExactILoopDependentTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant multiplyExact
+ * @library /testlibrary
* @compile MulExactINonConstantTest.java Verify.java
* @run main MulExactINonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,15 @@
* @test
* @bug 8026844
* @summary Test repeating multiplyExact
+ * @library /testlibrary
* @compile MulExactIRepeatTest.java Verify.java
* @run main MulExactIRepeatTest
*
*/
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
+
public class MulExactIRepeatTest {
public static void main(String[] args) {
runTest(new Verify.MulExactI());
@@ -44,7 +48,7 @@
}
public static void runTest(Verify.BinaryMethod method) {
- java.util.Random rnd = new java.util.Random();
+ Random rnd = Utils.getRandomInstance();
for (int i = 0; i < 50000; ++i) {
int x = Integer.MAX_VALUE - 10;
int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant mulExact
+ * @library /testlibrary
* @compile MulExactLConstantTest.java Verify.java
* @run main MulExactLConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant mulExact
+ * @library /testlibrary
* @compile MulExactLNonConstantTest.java Verify.java
* @run main MulExactLNonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant negExact
+ * @library /testlibrary
* @compile NegExactIConstantTest.java Verify.java
* @run main NegExactIConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactILoadTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactILoadTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test negExact
+ * @library /testlibrary
* @compile NegExactILoadTest.java Verify.java
* @run main NegExactILoadTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test negExact loop dependent
+ * @library /testlibrary
* @compile NegExactILoopDependentTest.java Verify.java
* @run main NegExactILoopDependentTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant negExact
+ * @library /testlibrary
* @compile NegExactINonConstantTest.java Verify.java
* @run main NegExactINonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant negExact
+ * @library /testlibrary
* @compile NegExactLConstantTest.java Verify.java
* @run main NegExactLConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant negExact
+ * @library /testlibrary
* @compile NegExactLNonConstantTest.java Verify.java
* @run main NegExactLNonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactICondTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactICondTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test subtractExact as condition
+ * @library /testlibrary
* @compile SubExactICondTest.java Verify.java
* @run main SubExactICondTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test constant subtractExact
+ * @library /testlibrary
* @compile SubExactIConstantTest.java Verify.java
* @run main SubExactIConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactILoadTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactILoadTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant subtractExact
+ * @library /testlibrary
* @compile SubExactILoadTest.java Verify.java
* @run main SubExactILoadTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant subtractExact
+ * @library /testlibrary
* @compile SubExactILoopDependentTest.java Verify.java
* @run main SubExactILoopDependentTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
* @test
* @bug 8026844
* @summary Test non constant subtractExact
+ * @library /testlibrary
* @compile SubExactINonConstantTest.java Verify.java
* @run main SubExactINonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,12 +25,14 @@
* @test
* @bug 8026844
* @summary Test repeating subtractExact
+ * @library /testlibrary
* @compile SubExactIRepeatTest.java Verify.java
* @run main SubExactIRepeatTest
*
*/
-import java.lang.ArithmeticException;
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
public class SubExactIRepeatTest {
public static void main(String[] args) {
@@ -46,7 +48,7 @@
}
public static void runTest(Verify.BinaryMethod method) {
- java.util.Random rnd = new java.util.Random();
+ Random rnd = Utils.getRandomInstance();
for (int i = 0; i < 50000; ++i) {
int x = Integer.MIN_VALUE + 10;
int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
* @bug 8026844
* @bug 8027353
* @summary Test constant subtractExact
+ * @library /testlibrary
* @compile SubExactLConstantTest.java Verify.java
* @run main SubExactLConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
* @bug 8026844
* @bug 8027353
* @summary Test non constant subtractExact
+ * @library /testlibrary
* @compile SubExactLNonConstantTest.java Verify.java
* @run main SubExactLNonConstantTest
*
--- a/hotspot/test/compiler/intrinsics/mathexact/Verify.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/intrinsics/mathexact/Verify.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,6 +21,13 @@
* questions.
*/
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
+
+/**
+ * The class depends on Utils class from testlibrary package.
+ * It uses factory method that obtains random generator.
+ */
public class Verify {
public static String throwWord(boolean threw) {
return (threw ? "threw" : "didn't throw");
@@ -134,7 +141,7 @@
public static class LoadTest {
- public static java.util.Random rnd = new java.util.Random();
+ public static Random rnd = Utils.getRandomInstance();
public static int[] values = new int[256];
public static void init() {
@@ -159,7 +166,7 @@
}
public static class NonConstantTest {
- public static java.util.Random rnd = new java.util.Random();
+ public static Random rnd = Utils.getRandomInstance();
public static int[] values = new int[] { Integer.MAX_VALUE, Integer.MIN_VALUE };
public static void verify(BinaryMethod method) {
@@ -180,7 +187,7 @@
public static class NonConstantLongTest {
public static long[] values = { Long.MIN_VALUE, Long.MAX_VALUE, 0, Long.MAX_VALUE - 1831 };
- public static java.util.Random rnd = new java.util.Random();
+ public static Random rnd = Utils.getRandomInstance();
public static void verify(BinaryLongMethod method) {
for (int i = 0; i < 50000; ++i) {
@@ -199,7 +206,7 @@
}
public static class LoopDependentTest {
- public static java.util.Random rnd = new java.util.Random();
+ public static Random rnd = Utils.getRandomInstance();
public static void verify(BinaryMethod method) {
int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt();
--- a/hotspot/test/compiler/jsr292/ConcurrentClassLoadingTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/jsr292/ConcurrentClassLoadingTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,18 +25,21 @@
* @test
* @bug 8022595
* @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
- *
+ * @library /testlibrary
* @run main/othervm ConcurrentClassLoadingTest
*/
-import java.util.*;
+import com.oracle.java.testlibrary.Utils;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
public class ConcurrentClassLoadingTest {
int numThreads = 0;
- long seed = 0;
CyclicBarrier l;
- Random rand;
+ private static final Random rand = Utils.getRandomInstance();
public static void main(String[] args) throws Throwable {
ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
@@ -49,9 +52,6 @@
while (i < args.length) {
String flag = args[i];
switch(flag) {
- case "-seed":
- seed = Long.parseLong(args[++i]);
- break;
case "-numThreads":
numThreads = Integer.parseInt(args[++i]);
break;
@@ -67,15 +67,9 @@
numThreads = Runtime.getRuntime().availableProcessors();
}
- if (seed == 0) {
- seed = (new Random()).nextLong();
- }
- rand = new Random(seed);
-
l = new CyclicBarrier(numThreads + 1);
System.out.printf("Threads: %d\n", numThreads);
- System.out.printf("Seed: %d\n", seed);
}
final List<Loader> loaders = new ArrayList<>();
@@ -90,7 +84,9 @@
System.out.printf("Thread #%d:\n", t);
for (int i = 0; i < count; i++) {
- if (c.size() == 0) break;
+ if (c.isEmpty()) {
+ break;
+ }
int k = rand.nextInt(c.size());
String elem = c.remove(k);
--- a/hotspot/test/compiler/startup/SmallCodeCacheStartup.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/startup/SmallCodeCacheStartup.java Wed Jul 05 20:07:30 2017 +0200
@@ -27,10 +27,20 @@
* @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
* to initialize all compiler threads. The option -Xcomp gives the VM more time to
* to trigger the old bug.
- * @run main/othervm -XX:ReservedCodeCacheSize=3m -XX:CICompilerCount=64 -Xcomp SmallCodeCacheStartup
+ * @library /testlibrary
*/
+import com.oracle.java.testlibrary.*;
+
public class SmallCodeCacheStartup {
public static void main(String[] args) throws Exception {
+ try {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m",
+ "-XX:CICompilerCount=64",
+ "-Xcomp",
+ "SmallCodeCacheStartup");
+ pb.start();
+ } catch (VirtualMachineError e) {}
+
System.out.println("TEST PASSED");
}
}
--- a/hotspot/test/compiler/types/correctness/OffTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/types/correctness/OffTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -36,10 +36,10 @@
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
import scenarios.ProfilingType;
-import java.util.Random;
-
public class OffTest {
private static final String[] OPTIONS = {
"-Xbootclasspath/a:.",
@@ -63,14 +63,7 @@
private static final int PROFILING_TYPE_INDEX = OPTIONS.length - 1;
private static final int TYPE_PROFILE_INDEX = OPTIONS.length - 4;
private static final int USE_TYPE_SPECULATION_INDEX = OPTIONS.length - 3;
- private static final Random RNG;
-
- static {
- String str = System.getProperty("seed");
- long seed = str != null ? Long.parseLong(str) : new Random().nextLong();
- RNG = new Random(seed);
- System.out.printf("-Dseed=%d%n", seed);
- }
+ private static final Random RNG = Utils.getRandomInstance();
public static void main(String[] args) throws Exception {
int count = DEFAULT_COUNT;
--- a/hotspot/test/compiler/unsafe/UnsafeRaw.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/unsafe/UnsafeRaw.java Wed Jul 05 20:07:30 2017 +0200
@@ -80,7 +80,7 @@
final int element_size = 4;
final int magic = 0x12345678;
- Random rnd = new Random();
+ Random rnd = Utils.getRandomInstance();
long array = unsafe.allocateMemory(array_size * element_size); // 128 ints
long addr = array + array_size * element_size / 2; // something in the middle to work with
--- a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -73,8 +73,6 @@
protected static final int THRESHOLD;
/** invocation count to trigger OSR compilation */
protected static final long BACKEDGE_THRESHOLD;
- /** invocation count to warm up method before triggering OSR compilation */
- protected static final long OSR_WARMUP = 2000;
/** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */
protected static final String MODE = System.getProperty("java.vm.info");
@@ -197,7 +195,6 @@
* is compiled, or if {@linkplain #method} has zero
* compilation level.
*/
-
protected final void checkNotCompiled(int compLevel) {
if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
throw new RuntimeException(method + " must not be in queue");
@@ -218,24 +215,30 @@
* compilation level.
*/
protected final void checkNotCompiled() {
- if (WHITE_BOX.isMethodCompiled(method, false)) {
- throw new RuntimeException(method + " must be not compiled");
- }
- if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) {
- throw new RuntimeException(method + " comp_level must be == 0");
- }
- checkNotOsrCompiled();
+ checkNotCompiled(true);
+ checkNotCompiled(false);
}
- protected final void checkNotOsrCompiled() {
+ /**
+ * Checks, that {@linkplain #method} is not (OSR-)compiled.
+ *
+ * @param isOsr Check for OSR compilation if true
+ * @throws RuntimeException if {@linkplain #method} is in compiler queue or
+ * is compiled, or if {@linkplain #method} has zero
+ * compilation level.
+ */
+ protected final void checkNotCompiled(boolean isOsr) {
+ waitBackgroundCompilation();
if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
throw new RuntimeException(method + " must not be in queue");
}
- if (WHITE_BOX.isMethodCompiled(method, true)) {
- throw new RuntimeException(method + " must be not osr_compiled");
+ if (WHITE_BOX.isMethodCompiled(method, isOsr)) {
+ throw new RuntimeException(method + " must not be " +
+ (isOsr ? "osr_" : "") + "compiled");
}
- if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
- throw new RuntimeException(method + " osr_comp_level must be == 0");
+ if (WHITE_BOX.getMethodCompilationLevel(method, isOsr) != 0) {
+ throw new RuntimeException(method + (isOsr ? " osr_" : " ") +
+ "comp_level must be == 0");
}
}
@@ -498,8 +501,7 @@
= new Callable<Integer>() {
@Override
public Integer call() throws Exception {
- int result = warmup(OSR_CONSTRUCTOR);
- return result + new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode();
+ return new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode();
}
};
@@ -509,8 +511,7 @@
@Override
public Integer call() throws Exception {
- int result = warmup(OSR_METHOD);
- return result + helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
+ return helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
}
};
@@ -518,66 +519,10 @@
= new Callable<Integer>() {
@Override
public Integer call() throws Exception {
- int result = warmup(OSR_STATIC);
- return result + osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
+ return osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
}
};
- /**
- * Deoptimizes all non-osr versions of the given executable after
- * compilation finished.
- *
- * @param e Executable
- * @throws Exception
- */
- private static void waitAndDeoptimize(Executable e) throws Exception {
- CompilerWhiteBoxTest.waitBackgroundCompilation(e);
- if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) {
- throw new RuntimeException(e + " must not be in queue");
- }
- // Deoptimize non-osr versions of executable
- WhiteBox.getWhiteBox().deoptimizeMethod(e, false);
- }
-
- /**
- * Executes the method multiple times to make sure we have
- * enough profiling information before triggering an OSR
- * compilation. Otherwise the C2 compiler may add uncommon traps.
- *
- * @param m Method to be executed
- * @return Number of times the method was executed
- * @throws Exception
- */
- private static int warmup(Method m) throws Exception {
- Helper helper = new Helper();
- int result = 0;
- for (long i = 0; i < CompilerWhiteBoxTest.OSR_WARMUP; ++i) {
- result += (int)m.invoke(helper, 1);
- }
- // Deoptimize non-osr versions
- waitAndDeoptimize(m);
- return result;
- }
-
- /**
- * Executes the constructor multiple times to make sure we
- * have enough profiling information before triggering an OSR
- * compilation. Otherwise the C2 compiler may add uncommon traps.
- *
- * @param c Constructor to be executed
- * @return Number of times the constructor was executed
- * @throws Exception
- */
- private static int warmup(Constructor c) throws Exception {
- int result = 0;
- for (long i = 0; i < CompilerWhiteBoxTest.OSR_WARMUP; ++i) {
- result += c.newInstance(null, 1).hashCode();
- }
- // Deoptimize non-osr versions
- waitAndDeoptimize(c);
- return result;
- }
-
private static final Constructor CONSTRUCTOR;
private static final Constructor OSR_CONSTRUCTOR;
private static final Method METHOD;
@@ -622,16 +567,83 @@
return 42;
}
- private static int osrStaticMethod(long limit) {
+ /**
+ * Deoptimizes all non-osr versions of the given executable after
+ * compilation finished.
+ *
+ * @param e Executable
+ * @throws Exception
+ */
+ private static void waitAndDeoptimize(Executable e) {
+ CompilerWhiteBoxTest.waitBackgroundCompilation(e);
+ if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) {
+ throw new RuntimeException(e + " must not be in queue");
+ }
+ // Deoptimize non-osr versions of executable
+ WhiteBox.getWhiteBox().deoptimizeMethod(e, false);
+ }
+
+ /**
+ * Executes the method multiple times to make sure we have
+ * enough profiling information before triggering an OSR
+ * compilation. Otherwise the C2 compiler may add uncommon traps.
+ *
+ * @param m Method to be executed
+ * @return Number of times the method was executed
+ * @throws Exception
+ */
+ private static int warmup(Method m) throws Exception {
+ waitAndDeoptimize(m);
+ Helper helper = new Helper();
int result = 0;
+ for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
+ result += (int)m.invoke(helper, 1);
+ }
+ // Wait to make sure OSR compilation is not blocked by
+ // non-OSR compilation in the compile queue
+ CompilerWhiteBoxTest.waitBackgroundCompilation(m);
+ return result;
+ }
+
+ /**
+ * Executes the constructor multiple times to make sure we
+ * have enough profiling information before triggering an OSR
+ * compilation. Otherwise the C2 compiler may add uncommon traps.
+ *
+ * @param c Constructor to be executed
+ * @return Number of times the constructor was executed
+ * @throws Exception
+ */
+ private static int warmup(Constructor c) throws Exception {
+ waitAndDeoptimize(c);
+ int result = 0;
+ for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
+ result += c.newInstance(null, 1).hashCode();
+ }
+ // Wait to make sure OSR compilation is not blocked by
+ // non-OSR compilation in the compile queue
+ CompilerWhiteBoxTest.waitBackgroundCompilation(c);
+ return result;
+ }
+
+ private static int osrStaticMethod(long limit) throws Exception {
+ int result = 0;
+ if (limit != 1) {
+ result = warmup(OSR_STATIC);
+ }
+ // Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += staticMethod();
}
return result;
}
- private int osrMethod(long limit) {
+ private int osrMethod(long limit) throws Exception {
int result = 0;
+ if (limit != 1) {
+ result = warmup(OSR_METHOD);
+ }
+ // Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += method();
}
@@ -646,8 +658,12 @@
}
// for OSR constructor test case
- private Helper(Object o, long limit) {
+ private Helper(Object o, long limit) throws Exception {
int result = 0;
+ if (limit != 1) {
+ result = warmup(OSR_CONSTRUCTOR);
+ }
+ // Trigger osr compilation
for (long i = 0; i < limit; ++i) {
result += method();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/whitebox/DeoptimizeMultipleOSRTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
+
+/*
+ * @test DeoptimizeMultipleOSRTest
+ * @bug 8061817
+ * @library /testlibrary /testlibrary/whitebox
+ * @build DeoptimizeMultipleOSRTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,DeoptimizeMultipleOSRTest::triggerOSR DeoptimizeMultipleOSRTest
+ * @summary testing of WB::deoptimizeMethod()
+ */
+public class DeoptimizeMultipleOSRTest {
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+ private static final long BACKEDGE_THRESHOLD = 150000;
+ private Method method;
+ private int counter = 0;
+
+ public static void main(String[] args) throws Exception {
+ DeoptimizeMultipleOSRTest test = new DeoptimizeMultipleOSRTest();
+ test.test();
+ }
+
+ /**
+ * Triggers two different OSR compilations for the same method and
+ * checks if WhiteBox.deoptimizeMethod() deoptimizes both.
+ *
+ * @throws Exception
+ */
+ public void test() throws Exception {
+ method = DeoptimizeMultipleOSRTest.class.getDeclaredMethod("triggerOSR", boolean.class, long.class);
+ // Trigger two OSR compiled versions
+ triggerOSR(true, BACKEDGE_THRESHOLD);
+ triggerOSR(false, BACKEDGE_THRESHOLD);
+ // Wait for compilation
+ CompilerWhiteBoxTest.waitBackgroundCompilation(method);
+ // Deoptimize
+ WHITE_BOX.deoptimizeMethod(method, true);
+ if (WHITE_BOX.isMethodCompiled(method, true)) {
+ throw new AssertionError("Not all OSR compiled versions were deoptimized");
+ }
+ }
+
+ /**
+ * Triggers OSR compilations by executing loops.
+ *
+ * @param first Determines which loop to execute
+ * @param limit The number of loop iterations
+ */
+ public void triggerOSR(boolean first, long limit) {
+ if (limit != 1) {
+ // Warmup method to avoid uncommon traps
+ for (int i = 0; i < limit; ++i) {
+ triggerOSR(first, 1);
+ }
+ CompilerWhiteBoxTest.waitBackgroundCompilation(method);
+ }
+ if (first) {
+ // Trigger OSR compilation 1
+ for (int i = 0; i < limit; ++i) {
+ counter++;
+ }
+ } else {
+ // Trigger OSR compilation 2
+ for (int i = 0; i < limit; ++i) {
+ counter++;
+ }
+ }
+ }
+}
--- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -132,14 +132,15 @@
throw new RuntimeException(method
+ " is not compilable after clearMethodState()");
}
-
+ // Make method not (OSR-)compilable (depending on testCase.isOsr())
makeNotCompilable();
if (isCompilable()) {
throw new RuntimeException(method + " must be not compilable");
}
-
+ // Try to (OSR-)compile method
compile();
- checkNotOsrCompiled();
+ // Method should not be (OSR-)compiled
+ checkNotCompiled(testCase.isOsr());
if (isCompilable()) {
throw new RuntimeException(method + " must be not compilable");
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/arguments/TestUseNUMAInterleaving.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestUseNUMAInterleaving
+ * @summary Tests that UseNUMAInterleaving enabled for all collectors by
+ * ergonomics, on all platforms when UseNUMA feature is enabled.
+ * @bug 8059614
+ * @key gc
+ * @library /testlibrary
+ * @run driver TestUseNUMAInterleaving
+ */
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestUseNUMAInterleaving {
+
+ public static void main(String[] args) throws Exception {
+ String[] vmargs = new String[]{
+ "-XX:+UseNUMA",
+ "-XX:+PrintFlagsFinal",
+ "-version"
+ };
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, vmargs);
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ boolean isNUMAEnabled
+ = Boolean.parseBoolean(output.firstMatch(NUMA_FLAG_PATTERN, 1));
+
+ if (isNUMAEnabled) {
+ output.shouldMatch("\\bUseNUMAInterleaving\\b.*?=.*?true");
+ System.out.println(output.getStdout());
+ } else {
+ System.out.println(output.firstMatch(NUMA_FLAG_PATTERN));
+ System.out.println(output.firstMatch(NUMA_FLAG_PATTERN, 1));
+ }
+ }
+
+ private static final String NUMA_FLAG_PATTERN = "\\bUseNUMA\\b.*?=.*?([a-z]+)";
+}
--- a/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Wed Jul 05 20:07:30 2017 +0200
@@ -135,7 +135,6 @@
"-XX:+UnlockDiagnosticVMOptions",
"-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
"-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
- "-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1", // make the code cache sweep more predictable
};
runTest("-client", baseArguments);
runTest("-server", baseArguments);
--- a/hotspot/test/runtime/7194254/Test7194254.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/runtime/7194254/Test7194254.java Wed Jul 05 20:07:30 2017 +0200
@@ -27,6 +27,7 @@
* @summary Creates several threads with different java priorities and checks
* whether jstack reports correct priorities for them.
*
+ * @ignore 8060219
* @run main Test7194254
*/
--- a/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Wed Jul 05 20:07:30 2017 +0200
@@ -24,41 +24,56 @@
/*
* @test
* @summary Test corner case that overflows malloc site hashtable bucket
+ * @requires sun.arch.data.model == "32"
* @key nmt jcmd stress
* @library /testlibrary /testlibrary/whitebox
- * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @ignore 8062870
* @build MallocSiteHashOverflow
* @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=480 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class MallocSiteHashOverflow {
- private static long K = 1024;
+
public static void main(String args[]) throws Exception {
- String vm_name = System.getProperty("java.vm.name");
+ // Size of entries based on malloc tracking header defined in mallocTracker.hpp
// For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket
- // For 64-bit systems, create 64K + 1 malloc sites with the same hash bucket to overflow a hash bucket
long entries = 257;
- if (Platform.is64bit()) {
- entries = 64 * K + 1;
- }
OutputAnalyzer output;
WhiteBox wb = WhiteBox.getWhiteBox();
+ int MAX_HASH_SIZE = wb.NMTGetHashSize();
// Grab my own PID
String pid = Integer.toString(ProcessTools.getProcessId());
ProcessBuilder pb = new ProcessBuilder();
- wb.NMTOverflowHashBucket(entries);
-
- // Run 'jcmd <pid> VM.native_memory summary'
+ // Verify that current tracking level is "detail"
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
output = new OutputAnalyzer(pb.start());
- output.shouldContain("Tracking level has been downgraded due to lack of resources");
+ output.shouldContain("Native Memory Tracking Statistics");
+
+ // Attempt to cause NMT to downgrade tracking level by allocating small amounts
+ // of memory with random pseudo call stack
+ int pc = 1;
+ for (int i = 0; i < entries; i++) {
+ long addr = wb.NMTMallocWithPseudoStack(1, pc);
+ if (addr == 0) {
+ throw new RuntimeException("NMTMallocWithPseudoStack: out of memory");
+ }
+ // We free memory here since it doesn't affect pseudo malloc alloc site hash table entries
+ wb.NMTFree(addr);
+ pc += MAX_HASH_SIZE;
+ if (i == entries) {
+ // Verify that tracking has been downgraded
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Tracking level has been downgraded due to lack of resources");
+ }
+ }
}
}
--- a/hotspot/test/runtime/NMT/UnsafeMallocLimit.java Thu Nov 13 10:22:24 2014 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8055289
- * @library /testlibrary
- * @build UnsafeMallocLimit
- * @run main/othervm -Xmx32m -XX:NativeMemoryTracking=summary UnsafeMallocLimit
- */
-
-import com.oracle.java.testlibrary.*;
-import sun.misc.Unsafe;
-
-public class UnsafeMallocLimit {
-
- public static void main(String args[]) throws Exception {
- if (Platform.is32bit()) {
- Unsafe unsafe = Utils.getUnsafe();
- try {
- unsafe.allocateMemory(1 << 30);
- throw new RuntimeException("Did not get expected OOME");
- } catch (OutOfMemoryError e) {
- // Expected exception
- }
- } else {
- System.out.println("Test only valid on 32-bit platforms");
- }
- }
-}
--- a/hotspot/test/runtime/NMT/UnsafeMallocLimit2.java Thu Nov 13 10:22:24 2014 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8058818
- * @library /testlibrary
- * @build UnsafeMallocLimit2
- * @run main/othervm -Xmx32m -XX:NativeMemoryTracking=off UnsafeMallocLimit2
- */
-
-import com.oracle.java.testlibrary.*;
-import sun.misc.Unsafe;
-
-public class UnsafeMallocLimit2 {
-
- public static void main(String args[]) throws Exception {
- if (Platform.is32bit()) {
- Unsafe unsafe = Utils.getUnsafe();
- try {
- // Allocate greater than MALLOC_MAX and likely won't fail to allocate,
- // so it hits the NMT code that asserted.
- // Test that this doesn't cause an assertion with NMT off.
- // The option above overrides if all the tests are run with NMT on.
- unsafe.allocateMemory(0x40000000);
- System.out.println("Allocation succeeded");
- } catch (OutOfMemoryError e) {
- System.out.println("Allocation failed");
- }
- } else {
- System.out.println("Test only valid on 32-bit platforms");
- }
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/RedefineTests/RedefineAnnotations.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @summary Test that type annotations are retained after a retransform
+ * @run main RedefineAnnotations buildagent
+ * @run main/othervm -javaagent:redefineagent.jar RedefineAnnotations
+ */
+
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import java.io.FileNotFoundException;
+import java.io.PrintWriter;
+import java.lang.NoSuchFieldException;
+import java.lang.NoSuchMethodException;
+import java.lang.RuntimeException;
+import java.lang.annotation.Annotation;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.lang.reflect.AnnotatedArrayType;
+import java.lang.reflect.AnnotatedParameterizedType;
+import java.lang.reflect.AnnotatedType;
+import java.lang.reflect.AnnotatedWildcardType;
+import java.lang.reflect.Executable;
+import java.lang.reflect.TypeVariable;
+import java.security.ProtectionDomain;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import jdk.internal.org.objectweb.asm.ClassReader;
+import jdk.internal.org.objectweb.asm.ClassVisitor;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.FieldVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.ASM5;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE_USE)
+@interface TestAnn {
+ String site();
+}
+
+public class RedefineAnnotations {
+ static Instrumentation inst;
+ public static void premain(String agentArgs, Instrumentation inst) {
+ RedefineAnnotations.inst = inst;
+ }
+
+ static class Transformer implements ClassFileTransformer {
+
+ public byte[] asm(ClassLoader loader, String className,
+ Class<?> classBeingRedefined,
+ ProtectionDomain protectionDomain, byte[] classfileBuffer)
+ throws IllegalClassFormatException {
+
+ ClassWriter cw = new ClassWriter(0);
+ ClassVisitor cv = new ReAddDummyFieldsClassVisitor(ASM5, cw) { };
+ ClassReader cr = new ClassReader(classfileBuffer);
+ cr.accept(cv, 0);
+ return cw.toByteArray();
+ }
+
+ public class ReAddDummyFieldsClassVisitor extends ClassVisitor {
+
+ LinkedList<F> fields = new LinkedList<>();
+
+ public ReAddDummyFieldsClassVisitor(int api, ClassVisitor cv) {
+ super(api, cv);
+ }
+
+ @Override public FieldVisitor visitField(int access, String name,
+ String desc, String signature, Object value) {
+ if (name.startsWith("dummy")) {
+ // Remove dummy field
+ fields.addLast(new F(access, name, desc, signature, value));
+ return null;
+ }
+ return cv.visitField(access, name, desc, signature, value);
+ }
+
+ @Override public void visitEnd() {
+ F f;
+ while ((f = fields.pollFirst()) != null) {
+ // Re-add dummy fields
+ cv.visitField(f.access, f.name, f.desc, f.signature, f.value);
+ }
+ }
+
+ private class F {
+ private int access;
+ private String name;
+ private String desc;
+ private String signature;
+ private Object value;
+ F(int access, String name, String desc, String signature, Object value) {
+ this.access = access;
+ this.name = name;
+ this.desc = desc;
+ this.signature = signature;
+ this.value = value;
+ }
+ }
+ }
+
+ @Override public byte[] transform(ClassLoader loader, String className,
+ Class<?> classBeingRedefined,
+ ProtectionDomain protectionDomain, byte[] classfileBuffer)
+ throws IllegalClassFormatException {
+
+ if (className.contains("TypeAnnotatedTestClass")) {
+ try {
+ // Here we remove and re-add the dummy fields. This shuffles the constant pool
+ return asm(loader, className, classBeingRedefined, protectionDomain, classfileBuffer);
+ } catch (Throwable e) {
+ // The retransform native code that called this method does not propagate
+ // exceptions. Instead of getting an uninformative generic error, catch
+ // problems here and print it, then exit.
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+ return null;
+ }
+ }
+
+ private static void buildAgent() {
+ try {
+ ClassFileInstaller.main("RedefineAnnotations");
+ } catch (Exception e) {
+ throw new RuntimeException("Could not write agent classfile", e);
+ }
+
+ try {
+ PrintWriter pw = new PrintWriter("MANIFEST.MF");
+ pw.println("Premain-Class: RedefineAnnotations");
+ pw.println("Agent-Class: RedefineAnnotations");
+ pw.println("Can-Retransform-Classes: true");
+ pw.close();
+ } catch (FileNotFoundException e) {
+ throw new RuntimeException("Could not write manifest file for the agent", e);
+ }
+
+ sun.tools.jar.Main jarTool = new sun.tools.jar.Main(System.out, System.err, "jar");
+ if (!jarTool.run(new String[] { "-cmf", "MANIFEST.MF", "redefineagent.jar", "RedefineAnnotations.class" })) {
+ throw new RuntimeException("Could not write the agent jar file");
+ }
+ }
+
+ public static void main(String argv[]) throws NoSuchFieldException, NoSuchMethodException {
+ if (argv.length == 1 && argv[0].equals("buildagent")) {
+ buildAgent();
+ return;
+ }
+
+ if (inst == null) {
+ throw new RuntimeException("Instrumentation object was null");
+ }
+
+ RedefineAnnotations test = new RedefineAnnotations();
+ test.testTransformAndVerify();
+ }
+
+ // Class type annotations
+ private Annotation classTypeParameterTA;
+ private Annotation extendsTA;
+ private Annotation implementsTA;
+
+ // Field type annotations
+ private Annotation fieldTA;
+ private Annotation innerTA;
+ private Annotation[] arrayTA = new Annotation[4];
+ private Annotation[] mapTA = new Annotation[5];
+
+ // Method type annotations
+ private Annotation returnTA, methodTypeParameterTA, formalParameterTA, throwsTA;
+
+ private void testTransformAndVerify()
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ Class<TypeAnnotatedTestClass> c = TypeAnnotatedTestClass.class;
+ Class<?> myClass = c;
+
+ /*
+ * Verify that the expected annotations are where they should be before transform.
+ */
+ verifyClassTypeAnnotations(c);
+ verifyFieldTypeAnnotations(c);
+ verifyMethodTypeAnnotations(c);
+
+ try {
+ inst.addTransformer(new Transformer(), true);
+ inst.retransformClasses(myClass);
+ } catch (UnmodifiableClassException e) {
+ throw new RuntimeException(e);
+ }
+
+ /*
+ * Verify that the expected annotations are where they should be after transform.
+ * Also verify that before and after are equal.
+ */
+ verifyClassTypeAnnotations(c);
+ verifyFieldTypeAnnotations(c);
+ verifyMethodTypeAnnotations(c);
+ }
+
+ private void verifyClassTypeAnnotations(Class c) {
+ Annotation anno;
+
+ anno = c.getTypeParameters()[0].getAnnotations()[0];
+ verifyTestAnn(classTypeParameterTA, anno, "classTypeParameter");
+ classTypeParameterTA = anno;
+
+ anno = c.getAnnotatedSuperclass().getAnnotations()[0];
+ verifyTestAnn(extendsTA, anno, "extends");
+ extendsTA = anno;
+
+ anno = c.getAnnotatedInterfaces()[0].getAnnotations()[0];
+ verifyTestAnn(implementsTA, anno, "implements");
+ implementsTA = anno;
+ }
+
+ private void verifyFieldTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ verifyBasicFieldTypeAnnotations(c);
+ verifyInnerFieldTypeAnnotations(c);
+ verifyArrayFieldTypeAnnotations(c);
+ verifyMapFieldTypeAnnotations(c);
+ }
+
+ private void verifyBasicFieldTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ Annotation anno = c.getDeclaredField("typeAnnotatedBoolean").getAnnotatedType().getAnnotations()[0];
+ verifyTestAnn(fieldTA, anno, "field");
+ fieldTA = anno;
+ }
+
+ private void verifyInnerFieldTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ AnnotatedType at = c.getDeclaredField("typeAnnotatedInner").getAnnotatedType();
+ Annotation anno = at.getAnnotations()[0];
+ verifyTestAnn(innerTA, anno, "inner");
+ innerTA = anno;
+ }
+
+ private void verifyArrayFieldTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ Annotation anno;
+ AnnotatedType at;
+
+ at = c.getDeclaredField("typeAnnotatedArray").getAnnotatedType();
+ anno = at.getAnnotations()[0];
+ verifyTestAnn(arrayTA[0], anno, "array1");
+ arrayTA[0] = anno;
+
+ for (int i = 1; i <= 3; i++) {
+ at = ((AnnotatedArrayType) at).getAnnotatedGenericComponentType();
+ anno = at.getAnnotations()[0];
+ verifyTestAnn(arrayTA[i], anno, "array" + (i + 1));
+ arrayTA[i] = anno;
+ }
+ }
+
+ private void verifyMapFieldTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+
+ Annotation anno;
+ AnnotatedType atBase;
+ AnnotatedType atParameter;
+ atBase = c.getDeclaredField("typeAnnotatedMap").getAnnotatedType();
+
+ anno = atBase.getAnnotations()[0];
+ verifyTestAnn(mapTA[0], anno, "map1");
+ mapTA[0] = anno;
+
+ atParameter =
+ ((AnnotatedParameterizedType) atBase).
+ getAnnotatedActualTypeArguments()[0];
+ anno = ((AnnotatedWildcardType) atParameter).getAnnotations()[0];
+ verifyTestAnn(mapTA[1], anno, "map2");
+ mapTA[1] = anno;
+
+ anno =
+ ((AnnotatedWildcardType) atParameter).
+ getAnnotatedUpperBounds()[0].getAnnotations()[0];
+ verifyTestAnn(mapTA[2], anno, "map3");
+ mapTA[2] = anno;
+
+ atParameter =
+ ((AnnotatedParameterizedType) atBase).
+ getAnnotatedActualTypeArguments()[1];
+ anno = ((AnnotatedParameterizedType) atParameter).getAnnotations()[0];
+ verifyTestAnn(mapTA[3], anno, "map4");
+ mapTA[3] = anno;
+
+ anno =
+ ((AnnotatedParameterizedType) atParameter).
+ getAnnotatedActualTypeArguments()[0].getAnnotations()[0];
+ verifyTestAnn(mapTA[4], anno, "map5");
+ mapTA[4] = anno;
+ }
+
+ private void verifyMethodTypeAnnotations(Class c)
+ throws NoSuchFieldException, NoSuchMethodException {
+ Annotation anno;
+ Executable typeAnnotatedMethod =
+ c.getDeclaredMethod("typeAnnotatedMethod", TypeAnnotatedTestClass.class);
+
+ anno = typeAnnotatedMethod.getAnnotatedReturnType().getAnnotations()[0];
+ verifyTestAnn(returnTA, anno, "return");
+ returnTA = anno;
+
+ anno = typeAnnotatedMethod.getTypeParameters()[0].getAnnotations()[0];
+ verifyTestAnn(methodTypeParameterTA, anno, "methodTypeParameter");
+ methodTypeParameterTA = anno;
+
+ anno = typeAnnotatedMethod.getAnnotatedParameterTypes()[0].getAnnotations()[0];
+ verifyTestAnn(formalParameterTA, anno, "formalParameter");
+ formalParameterTA = anno;
+
+ anno = typeAnnotatedMethod.getAnnotatedExceptionTypes()[0].getAnnotations()[0];
+ verifyTestAnn(throwsTA, anno, "throws");
+ throwsTA = anno;
+ }
+
+ private static void verifyTestAnn(Annotation verifyAgainst, Annotation anno, String expectedSite) {
+ verifyTestAnnSite(anno, expectedSite);
+
+ // When called before transform verifyAgainst will be null, when called
+ // after transform it will be the annotation from before the transform
+ if (verifyAgainst != null) {
+ assertTrue(anno.equals(verifyAgainst),
+ "Annotations do not match before and after." +
+ " Before: \"" + verifyAgainst + "\", After: \"" + anno + "\"");
+ }
+ }
+
+ private static void verifyTestAnnSite(Annotation testAnn, String expectedSite) {
+ String expectedAnn = "@TestAnn(site=" + expectedSite + ")";
+ assertTrue(testAnn.toString().equals(expectedAnn),
+ "Expected \"" + expectedAnn + "\", got \"" + testAnn + "\"");
+ }
+
+ public static class TypeAnnotatedTestClass <@TestAnn(site="classTypeParameter") S,T>
+ extends @TestAnn(site="extends") Thread
+ implements @TestAnn(site="implements") Runnable {
+
+ public @TestAnn(site="field") boolean typeAnnotatedBoolean;
+
+ public
+ RedefineAnnotations.
+ @TestAnn(site="inner") TypeAnnotatedTestClass
+ typeAnnotatedInner;
+
+ public
+ @TestAnn(site="array4") boolean
+ @TestAnn(site="array1") []
+ @TestAnn(site="array2") []
+ @TestAnn(site="array3") []
+ typeAnnotatedArray;
+
+ public @TestAnn(site="map1") Map
+ <@TestAnn(site="map2") ? extends @TestAnn(site="map3") String,
+ @TestAnn(site="map4") List<@TestAnn(site="map5") Object>> typeAnnotatedMap;
+
+ public int dummy1;
+ public int dummy2;
+ public int dummy3;
+
+ @TestAnn(site="return") <@TestAnn(site="methodTypeParameter") U,V> Class
+ typeAnnotatedMethod(@TestAnn(site="formalParameter") TypeAnnotatedTestClass arg)
+ throws @TestAnn(site="throws") ClassNotFoundException {
+
+ @TestAnn(site="local_variable_type") int foo = 0;
+ throw new ClassNotFoundException();
+ }
+
+ public void run() {}
+ }
+}
--- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java Wed Jul 05 20:07:30 2017 +0200
@@ -51,9 +51,12 @@
// Known issue, JDK-8038422 (assert() on Windows)
// new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"),
- // This will cause a VM crash; commenting out for now; see bug JDK-8038268
- // @ignore JDK-8038268
- // new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"),
+ // Too small of a misc code size should not cause a vm crash.
+ // It should result in the following error message:
+ // The shared miscellaneous code space is not large enough
+ // to preload requested classes. Use -XX:SharedMiscCodeSize=
+ // to increase the initial size of shared miscellaneous code space.
+ new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"),
// these values are larger than default ones, but should
// be acceptable and not cause failure
--- a/hotspot/test/runtime/lambda-features/InvokespecialInterface.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/runtime/lambda-features/InvokespecialInterface.java Wed Jul 05 20:07:30 2017 +0200
@@ -33,11 +33,12 @@
import java.util.function.*;
import java.util.*;
+public class InvokespecialInterface {
interface I {
default void imethod() { System.out.println("I::imethod"); }
}
-class C implements I {
+static class C implements I {
public void foo() { I.super.imethod(); } // invokespecial InterfaceMethod
public void bar() { I i = this; i.imethod(); } // invokeinterface same
public void doSomeInvokedynamic() {
@@ -48,7 +49,6 @@
}
}
-public class InvokespecialInterface {
public static void main(java.lang.String[] unused) {
// need to create C and call I::foo()
C c = new C();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/lambda-features/TestInterfaceInit.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8034275
+ * @summary [JDK 8u40] Test interface initialization: only for interfaces declaring default methods
+ * @run main TestInterfaceInit
+ */
+import java.util.List;
+import java.util.Arrays;
+import java.util.ArrayList;
+
+public class TestInterfaceInit {
+
+ static List<Class<?>> cInitOrder = new ArrayList<>();
+
+ // Declares a default method and initializes
+ interface I {
+ boolean v = TestInterfaceInit.out(I.class);
+ default void x() {}
+ }
+
+ // Declares a default method and initializes
+ interface J extends I {
+ boolean v = TestInterfaceInit.out(J.class);
+ default void x() {}
+ }
+ // No default method, does not initialize
+ interface JN extends J {
+ boolean v = TestInterfaceInit.out(JN.class);
+ }
+
+ // Declares a default method and initializes
+ interface K extends I {
+ boolean v = TestInterfaceInit.out(K.class);
+ default void x() {}
+ }
+
+ // No default method, does not initialize
+ interface KN extends K {
+ boolean v = TestInterfaceInit.out(KN.class);
+ }
+
+ interface L extends JN, KN {
+ boolean v = TestInterfaceInit.out(L.class);
+ default void x() {}
+ }
+
+ public static void main(String[] args) {
+ // Trigger initialization
+ boolean v = L.v;
+
+ List<Class<?>> expectedCInitOrder = Arrays.asList(I.class,J.class,K.class,L.class);
+ if (!cInitOrder.equals(expectedCInitOrder)) {
+ throw new RuntimeException(String.format("Class initialization array %s not equal to expected array %s", cInitOrder, expectedCInitOrder));
+ }
+ }
+
+ static boolean out(Class c) {
+ System.out.println("#: initializing " + c.getName());
+ cInitOrder.add(c);
+ return true;
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/lambda-features/TestInterfaceOrder.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8034275
+ * @summary [JDK 8u40] Test interface initialization order
+ * @run main TestInterfaceOrder
+ */
+
+import java.util.List;
+import java.util.Arrays;
+import java.util.ArrayList;
+
+public class TestInterfaceOrder {
+ static List<Class<?>> cInitOrder = new ArrayList<>();
+
+ public static void main(java.lang.String[] args) {
+ //Trigger initialization
+ C c = new C();
+
+ List<Class<?>> expectedCInitOrder = Arrays.asList(I.class, J.class, A.class, K.class, B.class, L.class, C.class);
+ if (!cInitOrder.equals(expectedCInitOrder)) {
+ throw new RuntimeException(String.format("Class initialization order %s not equal to expected order %s", cInitOrder, expectedCInitOrder));
+ }
+ }
+
+ interface I {
+ boolean v = TestInterfaceOrder.out(I.class);
+ default void i() {}
+ }
+
+ interface J extends I {
+ boolean v = TestInterfaceOrder.out(J.class);
+ default void j() {}
+ }
+
+ static class A implements J {
+ static boolean v = TestInterfaceOrder.out(A.class);
+ }
+
+ interface K extends I {
+ boolean v = TestInterfaceOrder.out(K.class);
+ default void k() {}
+ }
+
+ static class B extends A implements K {
+ static boolean v = TestInterfaceOrder.out(B.class);
+ }
+
+ interface L {
+ boolean v = TestInterfaceOrder.out(L.class);
+ default void l() {}
+ }
+
+ static class C extends B implements L {
+ static boolean v = TestInterfaceOrder.out(C.class);
+ }
+
+
+ static boolean out(Class c) {
+ System.out.println("#: initializing " + c.getName());
+ cInitOrder.add(c);
+ return true;
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/reflect/ArrayGetIntException.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6191224
+ * @summary (reflect) Misleading detail string in IllegalArgumentException thrown by Array.get<Type>
+ * @run main ArrayGetIntException
+ */
+import java.io.*;
+import java.lang.reflect.Array;
+
+public class ArrayGetIntException {
+ public static void main(String[] args) throws Exception {
+ Object[] objArray = {new Integer(Integer.MAX_VALUE)};
+
+ // this access is legal
+ try {
+ System.out.println(Array.get(objArray, 0));
+ System.out.println("Test #1 PASSES");
+ } catch(Exception e) {
+ failTest("Test #1 FAILS - legal access denied" + e.getMessage());
+ }
+
+ // this access is not legal, but needs to generate the proper exception message
+ try {
+ System.out.println(Array.getInt(objArray, 0));
+ failTest("Test #2 FAILS - no exception");
+ } catch(Exception e) {
+ System.out.println(e);
+ if (e.getMessage().equals("Argument is not an array of primitive type")) {
+ System.out.println("Test #2 PASSES");
+ } else {
+ failTest("Test #2 FAILS - incorrect message: " + e.getMessage());
+ }
+ }
+
+ // this access is not legal, but needs to generate the proper exception message
+ try {
+ System.out.println(Array.getInt(new Object(), 0));
+ failTest("Test #3 FAILS - no exception");
+ } catch(Exception e) {
+ System.out.println(e);
+ if (e.getMessage().equals("Argument is not an array")) {
+ System.out.println("Test #3 PASSES");
+ } else {
+ failTest("Test #3 FAILS - incorrect message: " + e.getMessage());
+ }
+ }
+ }
+
+ private static void failTest(String errStr) {
+ System.out.println(errStr);
+ throw new Error(errStr);
+ }
+}
--- a/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -27,6 +27,7 @@
* @library ..
* @build DcmdUtil CompilerQueueTest
* @run main CompilerQueueTest
+ * @run main/othervm -XX:-TieredCompilation CompilerQueueTest
* @run main/othervm -Xint CompilerQueueTest
* @summary Test of diagnostic command Compiler.queue
*/
@@ -87,7 +88,9 @@
}
private static void validateMethodLine(String str) throws Exception {
- String name = str.substring(19);
+ // Skip until package/class name begins. Trim to remove whitespace that
+ // may differ.
+ String name = str.substring(14).trim();
int sep = name.indexOf("::");
if (sep == -1) {
throw new Exception("Failed dcmd queue, didn't find separator :: in: " + name);
--- a/hotspot/test/serviceability/threads/TestFalseDeadLock.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/serviceability/threads/TestFalseDeadLock.java Wed Jul 05 20:07:30 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,14 +21,17 @@
* questions.
*/
+import com.oracle.java.testlibrary.Utils;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
import java.util.Random;
/*
* @test
+ * @ignore 8061157
* @bug 8016304
* @summary Make sure no deadlock is reported for this program which has no deadlocks.
+ * @library /testlibrary
* @run main/othervm TestFalseDeadLock
*/
@@ -65,7 +68,7 @@
public static class Test implements Runnable {
public void run() {
- Random r = new Random();
+ Random r = Utils.getRandomInstance();
while (running) {
try {
synchronized (this) {
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/Utils.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/Utils.java Wed Jul 05 20:07:30 2017 +0200
@@ -24,21 +24,21 @@
package com.oracle.java.testlibrary;
import static com.oracle.java.testlibrary.Asserts.assertTrue;
-
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
+import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.UnknownHostException;
import java.util.ArrayList;
-import java.util.List;
import java.util.Arrays;
import java.util.Collections;
-import java.util.regex.Pattern;
+import java.util.List;
+import java.util.Random;
import java.util.regex.Matcher;
-import java.lang.reflect.Field;
+import java.util.regex.Pattern;
import sun.misc.Unsafe;
/**
@@ -64,6 +64,21 @@
private static Unsafe unsafe = null;
/**
+ * Defines property name for seed value.
+ */
+ public static final String SEED_PROPERTY_NAME = "com.oracle.java.testlibrary.random.seed";
+
+ /* (non-javadoc)
+ * Random generator with (or without) predefined seed. Depends on
+ * "com.oracle.java.testlibrary.random.seed" property value.
+ */
+ private static volatile Random RANDOM_GENERATOR;
+
+ /**
+ * Contains the seed value used for {@link java.util.Random} creation.
+ */
+ public static final long SEED = Long.getLong(SEED_PROPERTY_NAME, new Random().nextLong());
+ /**
* Returns the value of 'test.timeout.factor' system property
* converted to {@code double}.
*/
@@ -332,4 +347,24 @@
}
return new String(hexView);
}
+
+ /**
+ * Returns {@link java.util.Random} generator initialized with particular seed.
+ * The seed could be provided via system property {@link Utils#SEED_PROPERTY_NAME}
+ * In case no seed is provided, the method uses a random number.
+ * The used seed printed to stdout.
+ * @return {@link java.util.Random} generator with particular seed.
+ */
+ public static Random getRandomInstance() {
+ if (RANDOM_GENERATOR == null) {
+ synchronized (Utils.class) {
+ if (RANDOM_GENERATOR == null) {
+ RANDOM_GENERATOR = new Random(SEED);
+ System.out.printf("For random generator using seed: %d%n", SEED);
+ System.out.printf("To re-run test with same seed value please add \"-D%s=%d\" to command line.%n", SEED_PROPERTY_NAME, SEED);
+ }
+ }
+ }
+ return RANDOM_GENERATOR;
+ }
}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Wed Jul 05 20:07:30 2017 +0200
@@ -98,10 +98,10 @@
public native void NMTCommitMemory(long addr, long size);
public native void NMTUncommitMemory(long addr, long size);
public native void NMTReleaseMemory(long addr, long size);
- public native void NMTOverflowHashBucket(long num);
public native long NMTMallocWithPseudoStack(long size, int index);
public native boolean NMTIsDetailSupported();
public native boolean NMTChangeTrackingLevel();
+ public native int NMTGetHashSize();
// Compiler
public native void deoptimizeAll();
@@ -179,6 +179,8 @@
public native void printRegionInfo(int context);
// VM flags
+ public native boolean isConstantVMFlag(String name);
+ public native boolean isLockedVMFlag(String name);
public native void setBooleanVMFlag(String name, boolean value);
public native void setIntxVMFlag(String name, long value);
public native void setUintxVMFlag(String name, long value);
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java Wed Jul 05 20:07:30 2017 +0200
@@ -34,18 +34,21 @@
return obj == null ? null : new NMethod(obj);
}
private NMethod(Object[] obj) {
- assert obj.length == 2;
+ assert obj.length == 3;
comp_level = (Integer) obj[0];
insts = (byte[]) obj[1];
+ compile_id = (Integer) obj[2];
}
public byte[] insts;
public int comp_level;
+ public int compile_id;
@Override
public String toString() {
return "NMethod{" +
"insts=" + insts +
", comp_level=" + comp_level +
+ ", compile_id=" + compile_id +
'}';
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary_tests/RandomGeneratorTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Verify correctnes of the random generator from Utility.java
+ * @library /testlibrary
+ * @run driver RandomGeneratorTest SAME_SEED
+ * @run driver RandomGeneratorTest NO_SEED
+ * @run driver RandomGeneratorTest DIFFERENT_SEED
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Utils;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+/**
+ * The test verifies correctness of work {@link com.oracle.java.testlibrary.Utils#getRandomInstance()}.
+ * Test works in three modes: same seed provided, no seed provided and
+ * different seed provided. In the first case the test expects that all random numbers
+ * will be repeated in all next iterations. For other two modes test expects that
+ * randomly generated numbers differ from original.
+ */
+public class RandomGeneratorTest {
+ private static final String SEED_VM_OPTION = "-D" + Utils.SEED_PROPERTY_NAME + "=";
+
+ public static void main( String[] args) throws Throwable {
+ if (args.length == 0) {
+ throw new Error("TESTBUG: No test mode provided.");
+ }
+ SeedOption seedOpt = SeedOption.valueOf(args[0]);
+ List<String> jvmArgs = new ArrayList<String>();
+ String optStr = seedOpt.getSeedOption();
+ if (optStr != null) {
+ jvmArgs.add(optStr);
+ }
+ jvmArgs.add(RandomRunner.class.getName());
+ String[] cmdLineArgs = jvmArgs.toArray(new String[jvmArgs.size()]);
+ String etalon = ProcessTools.executeTestJvm(cmdLineArgs).getOutput().trim();
+ seedOpt.verify(etalon, cmdLineArgs);
+ }
+
+ /**
+ * The utility enum helps to generate an appropriate string that should be passed
+ * to the command line depends on the testing mode. It is also responsible for the result
+ * validation.
+ */
+ private enum SeedOption {
+ SAME_SEED {
+ @Override
+ public String getSeedOption() {
+ return SEED_VM_OPTION + Utils.SEED;
+ }
+
+ @Override
+ protected boolean isOutputExpected(String orig, String output) {
+ return output.equals(orig);
+ }
+ },
+ DIFFERENT_SEED {
+ @Override
+ public String getSeedOption() {
+ return SEED_VM_OPTION + Utils.getRandomInstance().nextLong();
+ }
+
+ @Override
+ public void verify(String orig, String[] cmdLine) {
+ cmdLine[0] = getSeedOption();
+ super.verify(orig, cmdLine);
+ }
+ },
+ NO_SEED {
+ @Override
+ public String getSeedOption() {
+ return null;
+ }
+ };
+
+ /**
+ * Generates a string to be added as a command line argument.
+ * It contains "-D" prefix, system property name, '=' sign
+ * and seed value.
+ * @return command line argument
+ */
+ public abstract String getSeedOption();
+
+ protected boolean isOutputExpected(String orig, String output) {
+ return !output.equals(orig);
+ }
+
+ /**
+ * Verifies that the original output meets expectations
+ * depending on the test mode. It compares the output of second execution
+ * to original one.
+ * @param orig original output
+ * @param cmdLine command line arguments
+ * @throws Throwable - Throws an exception in case test failure.
+ */
+ public void verify(String orig, String[] cmdLine) {
+ String lastLineOrig = getLastLine(orig);
+ String lastLine;
+ try {
+ lastLine = getLastLine(ProcessTools.executeTestJvm(cmdLine).getOutput().trim());
+ } catch (Throwable t) {
+ throw new Error("TESTBUG: Unexpedted exception during jvm execution.", t);
+ }
+ if (!isOutputExpected(lastLineOrig, lastLine)) {
+ throw new AssertionError("Unexpected random number sequence for mode: " + this.name());
+ }
+ }
+
+ private static String getLastLine(String output) {
+ return output.substring(output.lastIndexOf(Utils.NEW_LINE)).trim();
+ }
+ }
+
+ /**
+ * The helper class generates several random numbers
+ * and prints them out.
+ */
+ public static class RandomRunner {
+ private static final int COUNT = 10;
+ public static void main(String[] args) {
+ StringBuilder sb = new StringBuilder();
+ Random rng = Utils.getRandomInstance();
+ for (int i = 0; i < COUNT; i++) {
+ sb.append(rng.nextLong()).append(' ');
+ }
+ System.out.println(sb.toString());
+ }
+ }
+}
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -43,6 +43,7 @@
private static final Boolean[] TESTS = {true, false, true, true, false};
private static final String TEST_NAME = "BooleanTest";
private static final String FLAG_NAME = "PrintCompilation";
+ private static final String FLAG_DEBUG_NAME = "SafepointALot";
private static final String METHOD = TEST_NAME + "::method";
private static final String METHOD1 = METHOD + "1";
private static final String METHOD2 = METHOD + "2";
@@ -54,6 +55,7 @@
VmFlagTest.WHITE_BOX::getBooleanVMFlag);
testFunctional(false);
testFunctional(true);
+ VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getBooleanVMFlag);
} else {
boolean value = Boolean.valueOf(args[0]);
method1();
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -34,7 +34,7 @@
*/
public class DoubleTest {
- private static final String FLAG_NAME = null;
+ private static final String FLAG_NAME = "CompileThresholdScaling";
private static final Double[] TESTS = {0d, -0d, -1d, 1d,
Double.MAX_VALUE, Double.MIN_VALUE, Double.NaN,
Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY};
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -35,6 +35,7 @@
public class IntxTest {
private static final String FLAG_NAME = "OnStackReplacePercentage";
+ private static final String FLAG_DEBUG_NAME = "InlineFrequencyCount";
private static final Long[] TESTS = {0L, 100L, -1L,
(long) Integer.MAX_VALUE, (long) Integer.MIN_VALUE};
@@ -42,6 +43,7 @@
VmFlagTest.runTest(FLAG_NAME, TESTS,
VmFlagTest.WHITE_BOX::setIntxVMFlag,
VmFlagTest.WHITE_BOX::getIntxVMFlag);
+ VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getIntxVMFlag);
}
}
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/StringTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/StringTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -35,12 +35,14 @@
public class StringTest {
private static final String FLAG_NAME = "CompileOnly";
+ private static final String FLAG_DEBUG_NAME = "SuppressErrorAt";
private static final String[] TESTS = {"StringTest::*", ""};
public static void main(String[] args) throws Exception {
VmFlagTest.runTest(FLAG_NAME, TESTS,
VmFlagTest.WHITE_BOX::setStringVMFlag,
VmFlagTest.WHITE_BOX::getStringVMFlag);
+ VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getStringVMFlag);
}
}
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -36,6 +36,7 @@
public class UintxTest {
private static final String FLAG_NAME = "VerifyGCStartAt";
+ private static final String FLAG_DEBUG_NAME = "CodeCacheMinimumUseSpace";
private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
(1L << 32L) - 1L, 1L << 32L};
private static final Long[] EXPECTED_64 = TESTS;
@@ -47,6 +48,7 @@
Platform.is64bit() ? EXPECTED_64 : EXPECTED_32,
VmFlagTest.WHITE_BOX::setUintxVMFlag,
VmFlagTest.WHITE_BOX::getUintxVMFlag);
+ VmFlagTest.runTest(FLAG_DEBUG_NAME, VmFlagTest.WHITE_BOX::getUintxVMFlag);
}
}
--- a/hotspot/test/testlibrary_tests/whitebox/vm_flags/VmFlagTest.java Thu Nov 13 10:22:24 2014 -0800
+++ b/hotspot/test/testlibrary_tests/whitebox/vm_flags/VmFlagTest.java Wed Jul 05 20:07:30 2017 +0200
@@ -37,16 +37,18 @@
private final BiConsumer<T, T> test;
private final BiConsumer<String, T> set;
private final Function<String, T> get;
+ private final boolean isPositive;
protected VmFlagTest(String flagName, BiConsumer<String, T> set,
Function<String, T> get, boolean isPositive) {
this.flagName = flagName;
this.set = set;
this.get = get;
+ this.isPositive = isPositive;
if (isPositive) {
- test = this::testPositive;
+ test = this::testWritePositive;
} else {
- test = this::testNegative;
+ test = this::testWriteNegative;
}
}
@@ -63,6 +65,10 @@
runTest(existentFlag, tests, tests, set, get);
}
+ protected static <T> void runTest(String existentFlag, Function<String, T> get) {
+ runTest(existentFlag, null, null, null, get);
+ }
+
protected static <T> void runTest(String existentFlag, T[] tests,
T[] results, BiConsumer<String, T> set, Function<String, T> get) {
if (existentFlag != null) {
@@ -72,13 +78,23 @@
}
public final void test(T[] tests, T[] results) {
- Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length");
- for (int i = 0, n = tests.length ; i < n; ++i) {
- test.accept(tests[i], results[i]);
+ if (isPositive) {
+ testRead();
+ }
+ if (tests != null) {
+ Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length");
+ for (int i = 0, n = tests.length ; i < n; ++i) {
+ test.accept(tests[i], results[i]);
+ }
}
}
protected String getVMOptionAsString() {
+ if (WHITE_BOX.isConstantVMFlag(flagName) || WHITE_BOX.isLockedVMFlag(flagName)) {
+ // JMM cannot access debug flags in product builds or locked flags,
+ // use whitebox methods to get such flags value.
+ return asString(getValue());
+ }
HotSpotDiagnosticMXBean diagnostic
= ManagementFactoryHelper.getDiagnosticMXBean();
VMOption tmp;
@@ -90,18 +106,24 @@
return tmp == null ? null : tmp.getValue();
}
- private void testPositive(T value, T expected) {
- String oldValue = getVMOptionAsString();
- Asserts.assertEQ(oldValue, asString(getValue()));
- Asserts.assertEQ(oldValue, asString(WHITE_BOX.getVMFlag(flagName)));
- setNewValue(value);
- String newValue = getVMOptionAsString();
- Asserts.assertEQ(newValue, asString(expected));
- Asserts.assertEQ(newValue, asString(getValue()));
- Asserts.assertEQ(newValue, asString(WHITE_BOX.getVMFlag(flagName)));
+ private String testRead() {
+ String value = getVMOptionAsString();
+ Asserts.assertNotNull(value);
+ Asserts.assertEQ(value, asString(getValue()));
+ Asserts.assertEQ(value, asString(WHITE_BOX.getVMFlag(flagName)));
+ return value;
}
- private void testNegative(T value, T expected) {
+ private void testWritePositive(T value, T expected) {
+ setNewValue(value);
+ String newValue = testRead();
+ Asserts.assertEQ(newValue, asString(expected));
+ }
+
+ private void testWriteNegative(T value, T expected) {
+ // Should always return false for non-existing flags
+ Asserts.assertFalse(WHITE_BOX.isConstantVMFlag(flagName));
+ Asserts.assertFalse(WHITE_BOX.isLockedVMFlag(flagName));
String oldValue = getVMOptionAsString();
Asserts.assertEQ(oldValue, asString(getValue()));
Asserts.assertEQ(oldValue, asString(WHITE_BOX.getVMFlag(flagName)));
@@ -114,4 +136,3 @@
return value == null ? null : "" + value;
}
}
-
--- a/make/CompileJavaModules.gmk Thu Nov 13 10:22:24 2014 -0800
+++ b/make/CompileJavaModules.gmk Wed Jul 05 20:07:30 2017 +0200
@@ -245,7 +245,6 @@
################################################################################
# Exclude building of IIOP transport for RMI Connector
-java.management_EXCLUDES := com/sun/jmx/remote/protocol/iiop
ifeq ($(RMICONNECTOR_IIOP), false)
java.management_EXCLUDES += com/sun/jmx/remote/protocol/iiop
@@ -264,11 +263,11 @@
java.corba_COPY := .prp
java.corba_CLEAN := .properties
-java.corba_EXCLUDES := \
+java.corba_EXCLUDES += \
com/sun/corba/se/PortableActivationIDL \
com/sun/tools/corba/se/logutil \
#
-java.corba_EXCLUDE_FILES := \
+java.corba_EXCLUDE_FILES += \
com/sun/corba/se/impl/presentation/rmi/JNDIStateFactoryImpl.java \
com/sun/corba/se/spi/presentation/rmi/StubWrapper.java \
com/sun/org/omg/CORBA/IDLTypeOperations.java \
@@ -350,12 +349,12 @@
$(JDK_TOPDIR)/src/jdk.sctp/unix/classes/sun/nio/ch/sctp/Shutdown.java
ifeq ($(OPENJDK_TARGET_OS), macosx)
- jdk.sctp_EXCLUDE_FILES := $(SCTP_IMPL_CLASSES)
+ jdk.sctp_EXCLUDE_FILES += $(SCTP_IMPL_CLASSES)
endif
ifeq ($(OPENJDK_TARGET_OS),aix)
# These files are duplicated in AIX_SRC_DIRS
- jdk.sctp_EXCLUDE_FILES := $(SCTP_IMPL_CLASSES)
+ jdk.sctp_EXCLUDE_FILES += $(SCTP_IMPL_CLASSES)
endif
################################################################################
@@ -373,7 +372,7 @@
################################################################################
-jdk.jdi_EXCLUDES := \
+jdk.jdi_EXCLUDES += \
com/sun/tools/example/debug/bdi \
com/sun/tools/example/debug/event \
com/sun/tools/example/debug/gui \
@@ -414,7 +413,7 @@
jdk.localedata_COPY := _dict _th
# Exclude BreakIterator classes that are just used in compile process to generate
# data files and shouldn't go in the product
-jdk.localedata_EXCLUDE_FILES := sun/text/resources/th/BreakIteratorRules_th.java
+jdk.localedata_EXCLUDE_FILES += sun/text/resources/th/BreakIteratorRules_th.java
################################################################################
# Setup the compilation of each module