--- a/.hgtags-top-repo Thu Jan 29 16:16:35 2015 -0800
+++ b/.hgtags-top-repo Wed Jul 05 20:16:58 2017 +0200
@@ -290,3 +290,4 @@
3dd628fde2086218d548841022ee8436b6b88185 jdk9-b45
12f1e276447bcc81516e85367d53e4f08897049d jdk9-b46
b6cca3e6175a69f39e5799b7349ddb0176630291 jdk9-b47
+0064e246d83f6f9fc245c19b6d05041ecaf4b6d4 jdk9-b48
--- a/common/autoconf/basics.m4 Thu Jan 29 16:16:35 2015 -0800
+++ b/common/autoconf/basics.m4 Wed Jul 05 20:16:58 2017 +0200
@@ -987,3 +987,26 @@
IS_RECONFIGURE=no
fi
])
+
+# Check for support for specific options in bash
+AC_DEFUN_ONCE([BASIC_CHECK_BASH_OPTIONS],
+[
+ # Test if bash supports pipefail.
+ AC_MSG_CHECKING([if bash supports pipefail])
+ if ${BASH} -c 'set -o pipefail'; then
+ BASH_ARGS="$BASH_ARGS -o pipefail"
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ fi
+
+ AC_MSG_CHECKING([if bash supports errexit (-e)])
+ if ${BASH} -e -c 'true'; then
+ BASH_ARGS="$BASH_ARGS -e"
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ fi
+
+ AC_SUBST(BASH_ARGS)
+])
--- a/common/autoconf/bootcycle-spec.gmk.in Thu Jan 29 16:16:35 2015 -0800
+++ b/common/autoconf/bootcycle-spec.gmk.in Wed Jul 05 20:16:58 2017 +0200
@@ -46,8 +46,12 @@
BOOT_JDK := $(JDK_IMAGE_DIR)
# The bootcycle build has a different output directory
-BUILD_OUTPUT:=@BUILD_OUTPUT@/bootcycle-build
-SJAVAC_SERVER_DIR:=$(subst @BUILD_OUTPUT@,$(BUILD_OUTPUT),$(SJAVAC_SERVER_DIR))
+OLD_BUILD_OUTPUT:=@BUILD_OUTPUT@
+BUILD_OUTPUT:=$(OLD_BUILD_OUTPUT)/bootcycle-build
+# The HOTSPOT_DIST dir is not defined relative to BUILD_OUTPUT in spec.gmk. Must not
+# use space in this patsubst to avoid leading space in HOTSPOT_DIST.
+HOTSPOT_DIST:=$(patsubst $(OLD_BUILD_OUTPUT)%,$(BUILD_OUTPUT)%,$(HOTSPOT_DIST))
+SJAVAC_SERVER_DIR:=$(patsubst $(OLD_BUILD_OUTPUT)%, $(BUILD_OUTPUT)%, $(SJAVAC_SERVER_DIR))
JAVA_CMD:=$(BOOT_JDK)/bin/java
JAVAC_CMD:=$(BOOT_JDK)/bin/javac
--- a/common/autoconf/configure.ac Thu Jan 29 16:16:35 2015 -0800
+++ b/common/autoconf/configure.ac Wed Jul 05 20:16:58 2017 +0200
@@ -113,6 +113,7 @@
# Setup tools that requires more complex handling, or that is not needed by the configure script.
BASIC_SETUP_COMPLEX_TOOLS
+BASIC_CHECK_BASH_OPTIONS
# Check if pkg-config is available.
PKG_PROG_PKG_CONFIG
--- a/common/autoconf/generated-configure.sh Thu Jan 29 16:16:35 2015 -0800
+++ b/common/autoconf/generated-configure.sh Wed Jul 05 20:16:58 2017 +0200
@@ -853,6 +853,7 @@
OS_VERSION_MINOR
OS_VERSION_MAJOR
PKG_CONFIG
+BASH_ARGS
CODESIGN
XATTR
DSYMUTIL
@@ -3522,6 +3523,9 @@
+# Check for support for specific options in bash
+
+
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -4329,7 +4333,7 @@
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1420811523
+DATE_WHEN_GENERATED=1421247827
###############################################################################
#
@@ -19609,6 +19613,32 @@
fi
+ # Test if bash supports pipefail.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if bash supports pipefail" >&5
+$as_echo_n "checking if bash supports pipefail... " >&6; }
+ if ${BASH} -c 'set -o pipefail'; then
+ BASH_ARGS="$BASH_ARGS -o pipefail"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if bash supports errexit (-e)" >&5
+$as_echo_n "checking if bash supports errexit (-e)... " >&6; }
+ if ${BASH} -e -c 'true'; then
+ BASH_ARGS="$BASH_ARGS -e"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+
+
+
+
# Check if pkg-config is available.
@@ -27408,8 +27438,8 @@
# The trailing space for everyone except PATH is no typo, but is needed due
# to trailing \ in the Windows paths. These will be stripped later.
$ECHO "$WINPATH_BASH -c 'echo VS_PATH="'\"$PATH\" > set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
- $ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE\;$include \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
- $ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB\;$lib \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
+ $ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
+ $ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo VCINSTALLDIR="'\"$VCINSTALLDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo WindowsSdkDir="'\"$WindowsSdkDir \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
$ECHO "$WINPATH_BASH -c 'echo WINDOWSSDKDIR="'\"$WINDOWSSDKDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
--- a/common/autoconf/spec.gmk.in Thu Jan 29 16:16:35 2015 -0800
+++ b/common/autoconf/spec.gmk.in Wed Jul 05 20:16:58 2017 +0200
@@ -78,6 +78,11 @@
OUTPUT_SYNC_SUPPORTED:=@OUTPUT_SYNC_SUPPORTED@
OUTPUT_SYNC:=@OUTPUT_SYNC@
+# Override the shell with bash
+BASH:=@BASH@
+BASH_ARGS:=@BASH_ARGS@
+SHELL:=$(BASH) $(BASH_ARGS)
+
# The "human readable" name of this configuration
CONF_NAME:=@CONF_NAME@
@@ -243,7 +248,7 @@
HOTSPOT_OUTPUTDIR=$(BUILD_OUTPUT)/hotspot
JDK_OUTPUTDIR=$(BUILD_OUTPUT)/jdk
IMAGES_OUTPUTDIR=$(BUILD_OUTPUT)/images
-TESTMAKE_OUTPUTDIR=$(BUILD_OUTPUT)/testmake
+TESTMAKE_OUTPUTDIR=$(BUILD_OUTPUT)/test-make
MAKESUPPORT_OUTPUTDIR=$(BUILD_OUTPUT)/make-support
HOTSPOT_DIST=@HOTSPOT_DIST@
@@ -495,7 +500,6 @@
# Tools adhering to a minimal and common standard of posix compliance.
AWK:=@AWK@
BASENAME:=@BASENAME@
-BASH:=@BASH@
CAT:=@CAT@
CCACHE:=@CCACHE@
# CD is going away, but remains to cater for legacy makefiles.
--- a/hotspot/.hgtags Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/.hgtags Wed Jul 05 20:16:58 2017 +0200
@@ -450,3 +450,4 @@
5dc8184af1e2bb30b0103113d1f1a58a21a80c37 jdk9-b45
a184ee1d717297bd35b7c3e35393e137921a3ed2 jdk9-b46
3b241fb72b8925b75941d612db762a6d5da66d02 jdk9-b47
+cc775a4a24c7f5d9e624b4205e9fbd48a17331f6 jdk9-b48
--- a/hotspot/make/aix/Makefile Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/aix/Makefile Wed Jul 05 20:16:58 2017 +0200
@@ -246,8 +246,7 @@
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
# If not found then fail fast.
check_j2se_version:
- $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
- if [ $$? -ne 0 ]; then \
+ $(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
$(REMOTE) $(RUN.JAVA) -version; \
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
"to bootstrap this build" 1>&2; \
--- a/hotspot/make/aix/makefiles/xlc.make Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/aix/makefiles/xlc.make Wed Jul 05 20:16:58 2017 +0200
@@ -74,6 +74,12 @@
# no xlc counterpart for -fcheck-new
# CFLAGS += -fcheck-new
+# We need to define this on the command line if we want to use the the
+# predefined format specifiers from "inttypes.h". Otherwise system headrs
+# can indirectly include inttypes.h before we define __STDC_FORMAT_MACROS
+# in globalDefinitions.hpp
+CFLAGS += -D__STDC_FORMAT_MACROS
+
ARCHFLAG = -q64
CFLAGS += $(ARCHFLAG)
--- a/hotspot/make/bsd/Makefile Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/bsd/Makefile Wed Jul 05 20:16:58 2017 +0200
@@ -240,8 +240,7 @@
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
# If not found then fail fast.
check_j2se_version:
- $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
- if [ $$? -ne 0 ]; then \
+ $(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
$(REMOTE) $(RUN.JAVA) -version; \
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
"to bootstrap this build" 1>&2; \
--- a/hotspot/make/bsd/makefiles/dtrace.make Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/bsd/makefiles/dtrace.make Wed Jul 05 20:16:58 2017 +0200
@@ -179,23 +179,23 @@
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -header > $@.tmp; touch $@; \
- if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
- then rm -f $@; mv $@.tmp $@; \
- else rm -f $@.tmp; \
+ if diff $@.tmp $@ > /dev/null 2>&1 ; \
+ then rm -f $@.tmp; \
+ else rm -f $@; mv $@.tmp $@; \
fi
$(JVMOFFS)Index.h: $(GENOFFS)
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -index > $@.tmp; touch $@; \
- if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
- then rm -f $@; mv $@.tmp $@; \
- else rm -f $@.tmp; \
+ if diff $@.tmp $@ > /dev/null 2>&1 ; \
+ then rm -f $@.tmp; \
+ else rm -f $@; mv $@.tmp $@; \
fi
$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -table > $@.tmp; touch $@; \
- if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
- then rm -f $@; mv $@.tmp $@; \
- else rm -f $@.tmp; \
+ if diff $@.tmp $@ > /dev/null 2>&1; \
+ then rm -f $@.tmp; \
+ else rm -f $@; mv $@.tmp $@; \
fi
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
--- a/hotspot/make/bsd/makefiles/universal.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/bsd/makefiles/universal.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -59,7 +59,7 @@
# Package built libraries in a universal binary
$(UNIVERSAL_LIPO_LIST):
- BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+ BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
$(MKDIR) -p $(shell dirname $@); \
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
@@ -70,7 +70,7 @@
# - copies directories; including empty dirs
# - copies files, symlinks, other non-directory files
$(UNIVERSAL_COPY_LIST):
- BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
+ BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
if [ -n "$${BUILT_COPY_FILES}" ]; then \
for i in $${BUILT_COPY_FILES}; do \
$(MKDIR) -p $(shell dirname $@); \
--- a/hotspot/make/linux/Makefile Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/linux/Makefile Wed Jul 05 20:16:58 2017 +0200
@@ -246,8 +246,7 @@
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
# If not found then fail fast.
check_j2se_version:
- $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
- if [ $$? -ne 0 ]; then \
+ $(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
$(REMOTE) $(RUN.JAVA) -version; \
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
"to bootstrap this build" 1>&2; \
--- a/hotspot/make/linux/makefiles/vm.make Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/linux/makefiles/vm.make Wed Jul 05 20:16:58 2017 +0200
@@ -334,10 +334,8 @@
rm -f $@.1; ln -s $@ $@.1; \
if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then \
if [ -x /usr/sbin/selinuxenabled ] ; then \
- /usr/sbin/selinuxenabled; \
- if [ $$? = 0 ] ; then \
- /usr/bin/chcon -t textrel_shlib_t $@; \
- if [ $$? != 0 ]; then \
+ if /usr/sbin/selinuxenabled; then \
+ if ! /usr/bin/chcon -t textrel_shlib_t $@; then \
echo "ERROR: Cannot chcon $@"; \
fi \
fi \
--- a/hotspot/make/sa.files Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/sa.files Wed Jul 05 20:16:58 2017 +0200
@@ -39,6 +39,7 @@
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ci/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/classfile/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
@@ -49,8 +50,10 @@
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ppc64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \
@@ -71,6 +74,7 @@
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/amd64/*.java \
@@ -101,6 +105,8 @@
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ppc64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \
--- a/hotspot/make/solaris/Makefile Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/solaris/Makefile Wed Jul 05 20:16:58 2017 +0200
@@ -190,8 +190,7 @@
XSLT_CHECK = $(RUN.JAVAP) javax.xml.transform.TransformerFactory
# If not found then fail fast.
check_j2se_version:
- $(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
- if [ $$? -ne 0 ]; then \
+ $(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
$(RUN.JAVA) -version; \
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
"to bootstrap this build" 1>&2; \
--- a/hotspot/make/solaris/makefiles/dtrace.make Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/make/solaris/makefiles/dtrace.make Wed Jul 05 20:16:58 2017 +0200
@@ -171,11 +171,11 @@
./lib$(GENOFFS).so
CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \
- cmp -s $@ $@.tmp; \
- case $$? in \
- 0) rm -f $@.tmp;; \
- *) rm -f $@ && mv $@.tmp $@ && echo Updated $@;; \
- esac
+ if cmp -s $@ $@.tmp; then \
+ rm -f $@.tmp; \
+ else \
+ rm -f $@ && mv $@.tmp $@ && echo Updated $@; \
+ fi
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -567,16 +567,21 @@
inline void load_with_trap_null_check(Register d, int si16, Register s1);
// Load heap oop and decompress. Loaded oop may not be null.
- inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
+ // Specify tmp to save one cycle.
+ inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg,
+ Register tmp = noreg);
+ // Store heap oop and decompress. Decompressed oop may not be null.
+ // Specify tmp register if d should not be changed.
inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
- /*specify if d must stay uncompressed*/ Register tmp = noreg);
+ Register tmp = noreg);
// Null allowed.
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
+ // src == d allowed.
inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
- inline void decode_heap_oop_not_null(Register d);
+ inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
// Null allowed.
inline void decode_heap_oop(Register d);
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -311,11 +311,14 @@
ld(d, si16, s1);
}
-inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
+inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
if (UseCompressedOops) {
- lwz(d, offs, s1);
+ // In disjoint mode decoding can save a cycle if src != dst.
+ Register narrowOop = (tmp != noreg && Universe::narrow_oop_base_disjoint()) ? tmp : d;
+ lwz(narrowOop, offs, s1);
// Attention: no null check here!
- decode_heap_oop_not_null(d);
+ Register res = decode_heap_oop_not_null(d, narrowOop);
+ assert(res == d, "caller will not consume loaded value");
} else {
ld(d, offs, s1);
}
@@ -340,26 +343,36 @@
}
inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
- Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
- if (Universe::narrow_oop_base() != NULL) {
+ Register current = (src != noreg) ? src : d; // Oop to be compressed is in d if no src provided.
+ if (Universe::narrow_oop_base_overlaps()) {
sub(d, current, R30);
current = d;
}
if (Universe::narrow_oop_shift() != 0) {
- srdi(d, current, LogMinObjAlignmentInBytes);
+ rldicl(d, current, 64-Universe::narrow_oop_shift(), 32); // Clears the upper bits.
current = d;
}
return current; // Encoded oop is in this register.
}
-inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
+inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) {
+ if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d &&
+ Universe::narrow_oop_shift() != 0) {
+ mr(d, R30);
+ rldimi(d, src, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
+ return d;
+ }
+
+ Register current = (src != noreg) ? src : d; // Compressed oop is in d if no src provided.
if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- sldi(d, d, LogMinObjAlignmentInBytes);
+ sldi(d, current, Universe::narrow_oop_shift());
+ current = d;
}
if (Universe::narrow_oop_base() != NULL) {
- add(d, d, R30);
+ add(d, current, R30);
+ current = d;
}
+ return current; // Decoded oop is in this register.
}
inline void MacroAssembler::decode_heap_oop(Register d) {
@@ -368,13 +381,7 @@
cmpwi(CCR0, d, 0);
beq(CCR0, isNull);
}
- if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- sldi(d, d, LogMinObjAlignmentInBytes);
- }
- if (Universe::narrow_oop_base() != NULL) {
- add(d, d, R30);
- }
+ decode_heap_oop_not_null(d);
bind(isNull);
}
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -172,15 +172,15 @@
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
- __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv);
+ __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv, temp2);
__ verify_oop(method_temp);
- __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp);
+ __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
__ verify_oop(method_temp);
- // the following assumes that a Method* is normally compressed in the vmtarget field:
+ // The following assumes that a Method* is normally compressed in the vmtarget field:
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
- // make sure recv is already on stack
+ // Make sure recv is already on stack.
__ ld(temp2, in_bytes(Method::const_offset()), method_temp);
__ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
sizeof(u2), /*is_signed*/ false);
@@ -259,8 +259,9 @@
}
if (TraceMethodHandles) {
- if (tmp_mh != noreg)
+ if (tmp_mh != noreg) {
__ mr(R23_method_handle, tmp_mh); // make stub happy
+ }
trace_method_handle_interpreter_entry(_masm, iid);
}
@@ -332,7 +333,7 @@
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
Label L_ok;
Register temp2_defc = temp2;
- __ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
+ __ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
@@ -407,7 +408,7 @@
}
Register temp2_intf = temp2;
- __ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
+ __ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
__ verify_klass_ptr(temp2_intf);
@@ -464,7 +465,7 @@
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
- adaptername, mh_reg_name, (intptr_t) mh, (intptr_t) entry_sp);
+ adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
if (Verbose) {
tty->print_cr("Registers:");
@@ -535,23 +536,22 @@
BLOCK_COMMENT("trace_method_handle {");
- int nbytes_save = 10 * 8; // 10 volatile gprs
- __ save_LR_CR(R0);
- __ mr(R0, R1_SP); // saved_sp
- assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
- // Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit.
- __ push_frame_reg_args(nbytes_save, R0);
- __ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0.
+ const Register tmp = R11; // Will be preserved.
+ const int nbytes_save = 11*8; // volatile gprs except R0
+ __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ save_LR_CR(tmp); // save in old frame
- __ load_const(R3_ARG1, (address)adaptername);
+ __ mr(R5_ARG3, R1_SP); // saved_sp
+ __ push_frame_reg_args(nbytes_save, tmp);
+
+ __ load_const_optimized(R3_ARG1, (address)adaptername, tmp);
__ mr(R4_ARG2, R23_method_handle);
- __ mr(R5_ARG3, R0); // saved_sp
__ mr(R6_ARG4, R1_SP);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
- __ restore_volatile_gprs(R1_SP, 112); // Except R0.
__ pop_frame();
- __ restore_LR_CR(R0);
+ __ restore_LR_CR(tmp);
+ __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
BLOCK_COMMENT("} trace_method_handle");
}
--- a/hotspot/src/cpu/ppc/vm/ppc.ad Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad Wed Jul 05 20:16:58 2017 +0200
@@ -1,6 +1,6 @@
//
-// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
-// Copyright 2012, 2014 SAP AG. All rights reserved.
+// Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+// Copyright 2012, 2015 SAP AG. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -2698,7 +2698,7 @@
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
__ relocate(a.rspec());
} else if (constant_reloc == relocInfo::metadata_type) {
- AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
+ AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
__ relocate(a.rspec());
} else {
@@ -2727,7 +2727,7 @@
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
__ relocate(a.rspec());
} else if (constant_reloc == relocInfo::metadata_type) {
- AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
+ AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
__ relocate(a.rspec());
} else { // non-oop pointers, e.g. card mark base, heap top
@@ -6029,6 +6029,20 @@
ins_pipe(pipe_class_default);
%}
+// Optimize DecodeN for disjoint base.
+// Load base of compressed oops into a register
+instruct loadBase(iRegLdst dst) %{
+ effect(DEF dst);
+
+ format %{ "MR $dst, r30_heapbase" %}
+ size(4);
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_or);
+ __ mr($dst$$Register, R30);
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
// Loading ConN must be postalloc expanded so that edges between
// the nodes are safe. They may not interfere with a safepoint.
// GL TODO: This needs three instructions: better put this into the constant pool.
@@ -6724,13 +6738,12 @@
ins_pipe(pipe_class_default);
%}
-// base != 0
-// 32G aligned narrow oop base.
-instruct encodeP_32GAligned(iRegNdst dst, iRegPsrc src) %{
+// Disjoint narrow oop base.
+instruct encodeP_Disjoint(iRegNdst dst, iRegPsrc src) %{
match(Set dst (EncodeP src));
- predicate(false /* TODO: PPC port Universe::narrow_oop_base_disjoint()*/);
-
- format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
+ predicate(Universe::narrow_oop_base_disjoint());
+
+ format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
@@ -6745,7 +6758,7 @@
effect(TEMP crx);
predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
Universe::narrow_oop_shift() != 0 &&
- true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
+ Universe::narrow_oop_base_overlaps());
format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
@@ -6756,7 +6769,7 @@
match(Set dst (EncodeP src));
predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
Universe::narrow_oop_shift() != 0 &&
- true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
+ Universe::narrow_oop_base_overlaps());
format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
@@ -6876,6 +6889,7 @@
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
Universe::narrow_oop_shift() != 0 &&
Universe::narrow_oop_base() != 0);
+ ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
effect(TEMP crx);
format %{ "DecodeN $dst, $src \t// Kills $crx, postalloc expanded" %}
@@ -6897,6 +6911,106 @@
ins_pipe(pipe_class_default);
%}
+// Optimize DecodeN for disjoint base.
+// Shift narrow oop and or it into register that already contains the heap base.
+// Base == dst must hold, and is assured by construction in postaloc_expand.
+instruct decodeN_mergeDisjoint(iRegPdst dst, iRegNsrc src, iRegLsrc base) %{
+ match(Set dst (DecodeN src));
+ effect(TEMP base);
+ predicate(false);
+
+ format %{ "RLDIMI $dst, $src, shift, 32-shift \t// DecodeN (disjoint base)" %}
+ size(4);
+ ins_encode %{
+ // TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
+ __ rldimi($dst$$Register, $src$$Register, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
+ %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Optimize DecodeN for disjoint base.
+// This node requires only one cycle on the critical path.
+// We must postalloc_expand as we can not express use_def effects where
+// the used register is L and the def'ed register P.
+instruct decodeN_Disjoint_notNull_Ex(iRegPdst dst, iRegNsrc src) %{
+ match(Set dst (DecodeN src));
+ effect(TEMP_DEF dst);
+ predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
+ n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
+ Universe::narrow_oop_base_disjoint());
+ ins_cost(DEFAULT_COST);
+
+ format %{ "MOV $dst, R30 \t\n"
+ "RLDIMI $dst, $src, shift, 32-shift \t// decode with disjoint base" %}
+ postalloc_expand %{
+ loadBaseNode *n1 = new loadBaseNode();
+ n1->add_req(NULL);
+ n1->_opnds[0] = op_dst;
+
+ decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
+ n2->add_req(n_region, n_src, n1);
+ n2->_opnds[0] = op_dst;
+ n2->_opnds[1] = op_src;
+ n2->_opnds[2] = op_dst;
+ n2->_bottom_type = _bottom_type;
+
+ ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+ ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+ nodes->push(n1);
+ nodes->push(n2);
+ %}
+%}
+
+instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
+ match(Set dst (DecodeN src));
+ effect(TEMP_DEF dst, TEMP crx);
+ predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
+ n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
+ Universe::narrow_oop_base_disjoint() && VM_Version::has_isel());
+ ins_cost(3 * DEFAULT_COST);
+
+ format %{ "DecodeN $dst, $src \t// decode with disjoint base using isel" %}
+ postalloc_expand %{
+ loadBaseNode *n1 = new loadBaseNode();
+ n1->add_req(NULL);
+ n1->_opnds[0] = op_dst;
+
+ cmpN_reg_imm0Node *n_compare = new cmpN_reg_imm0Node();
+ n_compare->add_req(n_region, n_src);
+ n_compare->_opnds[0] = op_crx;
+ n_compare->_opnds[1] = op_src;
+ n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
+
+ decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
+ n2->add_req(n_region, n_src, n1);
+ n2->_opnds[0] = op_dst;
+ n2->_opnds[1] = op_src;
+ n2->_opnds[2] = op_dst;
+ n2->_bottom_type = _bottom_type;
+
+ cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
+ n_cond_set->add_req(n_region, n_compare, n2);
+ n_cond_set->_opnds[0] = op_dst;
+ n_cond_set->_opnds[1] = op_crx;
+ n_cond_set->_opnds[2] = op_dst;
+ n_cond_set->_bottom_type = _bottom_type;
+
+ assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
+ ra_->set_oop(n_cond_set, true);
+
+ ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+ ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
+ ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+ ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+ nodes->push(n1);
+ nodes->push(n_compare);
+ nodes->push(n2);
+ nodes->push(n_cond_set);
+ %}
+%}
+
// src != 0, shift != 0, base != 0
instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
match(Set dst (DecodeN src));
@@ -6904,6 +7018,7 @@
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
Universe::narrow_oop_shift() != 0 &&
Universe::narrow_oop_base() != 0);
+ ins_cost(2 * DEFAULT_COST);
format %{ "DecodeN $dst, $src \t// $src != NULL, postalloc expanded" %}
postalloc_expand( postalloc_expand_decode_oop_not_null(dst, src));
@@ -6973,13 +7088,12 @@
ins_pipe(pipe_class_default);
%}
-// base != 0
-// 32G aligned narrow oop base.
-instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
+// Disjoint narrow oop base.
+instruct encodePKlass_Disjoint(iRegNdst dst, iRegPsrc src) %{
match(Set dst (EncodePKlass src));
predicate(false /* TODO: PPC port Universe::narrow_klass_base_disjoint()*/);
- format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
+ format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
size(4);
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
@@ -7486,7 +7600,7 @@
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ cmpxchgd($crx$$CondRegister, R0, $oldVal$$Register, $newVal$$Register, $mem_ptr$$Register,
- MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
+ MacroAssembler::MemBarAcq, MacroAssembler::cmpxchgx_hint_atomic_update(),
noreg, NULL, true);
%}
ins_pipe(pipe_class_default);
@@ -10476,7 +10590,7 @@
match(Set crx (CmpN src1 src2));
size(4);
- ins_cost(DEFAULT_COST);
+ ins_cost(2);
format %{ "CMPLW $crx, $src1, $src2 \t// compressed ptr" %}
ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
@@ -10488,7 +10602,7 @@
instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{
match(Set crx (CmpN src1 src2));
// Make this more expensive than zeroCheckN_iReg_imm0.
- ins_cost(DEFAULT_COST);
+ ins_cost(2);
format %{ "CMPLWI $crx, $src1, $src2 \t// compressed ptr" %}
size(4);
@@ -10508,6 +10622,7 @@
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
_leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
Matcher::branches_to_uncommon_trap(_leaf));
+ ins_cost(1); // Should not be cheaper than zeroCheckN.
ins_is_TrapBasedCheckNode(true);
@@ -10889,7 +11004,7 @@
instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
match(Set result (PartialSubtypeCheck subklass superklass));
- effect(TEMP result, TEMP tmp_klass, TEMP tmp_arrayptr);
+ effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
ins_cost(DEFAULT_COST*10);
format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
@@ -11000,7 +11115,7 @@
predicate(SpecialStringIndexOf); // type check implicit by parameter type, See Matcher::match_rule_supported
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
- effect(TEMP result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
+ effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
ins_cost(150);
format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
@@ -11037,7 +11152,7 @@
iRegIdst tmp1, iRegIdst tmp2,
flagsRegCR0 cr0, flagsRegCR1 cr1) %{
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
- effect(USE_KILL needle, /* TDEF needle, */ TEMP result,
+ effect(USE_KILL needle, /* TDEF needle, */ TEMP_DEF result,
TEMP tmp1, TEMP tmp2);
// Required for EA: check if it is still a type_array.
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
@@ -11084,7 +11199,7 @@
iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
- effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP result,
+ effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6);
// Required for EA: check if it is still a type_array.
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
@@ -11118,7 +11233,7 @@
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
- TEMP result,
+ TEMP_DEF result,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6);
predicate(SpecialStringIndexOf); // See Matcher::match_rule_supported.
ins_cost(300);
@@ -11142,7 +11257,7 @@
iRegPdst tmp1, iRegPdst tmp2,
flagsRegCR0 cr0, flagsRegCR6 cr6, regCTR ctr) %{
match(Set result (StrEquals (Binary str1 str2) cntImm));
- effect(TEMP result, TEMP tmp1, TEMP tmp2,
+ effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2,
KILL cr0, KILL cr6, KILL ctr);
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
ins_cost(250);
@@ -11165,7 +11280,7 @@
iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, iRegPdst tmp4, iRegPdst tmp5,
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
match(Set result (StrEquals (Binary str1 str2) cnt));
- effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
+ effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
KILL cr0, KILL cr1, KILL cr6, KILL ctr);
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
ins_cost(300);
@@ -11188,7 +11303,7 @@
instruct string_compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
iRegPdst tmp, flagsRegCR0 cr0, regCTR ctr) %{
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
- effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP result, TEMP tmp, KILL cr0, KILL ctr);
+ effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP_DEF result, TEMP tmp, KILL cr0, KILL ctr);
ins_cost(300);
ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -483,15 +483,6 @@
}
-jbyte* G1PostBarrierStub::_byte_map_base = NULL;
-
-jbyte* G1PostBarrierStub::byte_map_base_slow() {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->is_a(BarrierSet::G1SATBCTLogging),
- "Must be if we're using this.");
- return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
-}
-
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1374,6 +1374,7 @@
}
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
+ Register method_counters,
Register Rtmp,
Label &profile_continue) {
assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1386,9 +1387,8 @@
br_notnull_short(ImethodDataPtr, Assembler::pn, done);
// Test to see if we should create a method data oop
- AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
- sethi(profile_limit, Rtmp);
- ld(Rtmp, profile_limit.low10(), Rtmp);
+ Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset());
+ ld(profile_limit, Rtmp);
cmp(invocation_count, Rtmp);
// Use long branches because call_VM() code and following code generated by
// test_backedge_count_for_osr() is large in debug VM.
@@ -2375,6 +2375,7 @@
#ifndef CC_INTERP
void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
+ Register method_counters,
Register branch_bcp,
Register Rtmp ) {
Label did_not_overflow;
@@ -2382,8 +2383,8 @@
assert_different_registers(backedge_count, Rtmp, branch_bcp);
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
- AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
- load_contents(limit, Rtmp);
+ Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
+ ld(limit, Rtmp);
cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
// When ProfileInterpreter is on, the backedge_count comes from the
@@ -2500,17 +2501,13 @@
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask_addr,
Register scratch1, Register scratch2,
Condition cond, Label *where) {
ld(counter_addr, scratch1);
add(scratch1, increment, scratch1);
- if (is_simm13(mask)) {
- andcc(scratch1, mask, G0);
- } else {
- set(mask, scratch2);
- andcc(scratch1, scratch2, G0);
- }
+ ld(mask_addr, scratch2);
+ andcc(scratch1, scratch2, G0);
br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr);
}
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -267,7 +267,7 @@
void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
#ifndef CC_INTERP
- void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp );
+ void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp );
#endif /* CC_INTERP */
// Object locking
@@ -280,7 +280,7 @@
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Label& zero_continue);
void verify_method_data_pointer();
- void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
+ void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rtmp, Label &profile_continue);
void set_mdp_data_at(int constant, Register value);
void increment_mdp_data_at(Address counter, Register bumped_count,
@@ -291,7 +291,7 @@
Register bumped_count, Register scratch2,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask_addr,
Register scratch1, Register scratch2,
Condition cond, Label *where);
void set_mdp_flag_at(int flag_constant, Register scratch);
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -282,12 +282,11 @@
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
// Note: In tiered we increment either counters in MethodCounters* or in
// MDO depending if we're profiling or not.
- const Register Rcounters = G3_scratch;
+ const Register G3_method_counters = G3_scratch;
Label done;
if (TieredCompilation) {
const int increment = InvocationCounter::count_increment;
- const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo;
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
@@ -297,6 +296,7 @@
Address mdo_invocation_counter(G4_scratch,
in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
G3_scratch, Lscratch,
Assembler::zero, overflow);
@@ -305,20 +305,21 @@
// Increment counter in MethodCounters*
__ bind(no_mdo);
- Address invocation_counter(Rcounters,
+ Address invocation_counter(G3_method_counters,
in_bytes(MethodCounters::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
- __ get_method_counters(Lmethod, Rcounters, done);
+ __ get_method_counters(Lmethod, G3_method_counters, done);
+ Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask,
G4_scratch, Lscratch,
Assembler::zero, overflow);
__ bind(done);
- } else {
+ } else { // not TieredCompilation
// Update standard invocation counters
- __ get_method_counters(Lmethod, Rcounters, done);
- __ increment_invocation_counter(Rcounters, O0, G4_scratch);
+ __ get_method_counters(Lmethod, G3_method_counters, done);
+ __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
if (ProfileInterpreter) {
- Address interpreter_invocation_counter(Rcounters,
+ Address interpreter_invocation_counter(G3_method_counters,
in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G4_scratch);
__ inc(G4_scratch);
@@ -327,16 +328,16 @@
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
- AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
- __ load_contents(profile_limit, G3_scratch);
- __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
+ Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
+ __ ld(profile_limit, G1_scratch);
+ __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(*profile_method);
}
- AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
- __ load_contents(invocation_limit, G3_scratch);
+ Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
+ __ ld(invocation_limit, G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
__ delayed()->nop();
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1599,13 +1599,12 @@
// Bump bytecode pointer by displacement (take the branch)
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
- const Register Rcounters = G3_scratch;
- __ get_method_counters(Lmethod, Rcounters, Lforward);
+ const Register G3_method_counters = G3_scratch;
+ __ get_method_counters(Lmethod, G3_method_counters, Lforward);
if (TieredCompilation) {
Label Lno_mdo, Loverflow;
int increment = InvocationCounter::count_increment;
- int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
__ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
@@ -1614,6 +1613,7 @@
// Increment backedge counter in the MDO
Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
Assembler::notZero, &Lforward);
__ ba_short(Loverflow);
@@ -1621,9 +1621,10 @@
// If there's no MDO, increment counter in MethodCounters*
__ bind(Lno_mdo);
- Address backedge_counter(Rcounters,
+ Address backedge_counter(G3_method_counters,
in_bytes(MethodCounters::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
Assembler::notZero, &Lforward);
__ bind(Loverflow);
@@ -1663,18 +1664,19 @@
__ jmp(O2, G0);
__ delayed()->nop();
- } else {
+ } else { // not TieredCompilation
// Update Backedge branch separately from invocations
const Register G4_invoke_ctr = G4;
- __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
+ __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) {
- __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
+ __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
if (UseOnStackReplacement) {
- __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
+
+ __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
}
} else {
if (UseOnStackReplacement) {
- __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
+ __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
}
}
}
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -541,15 +541,6 @@
}
-jbyte* G1PostBarrierStub::_byte_map_base = NULL;
-
-jbyte* G1PostBarrierStub::byte_map_base_slow() {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->is_a(BarrierSet::G1SATBCTLogging),
- "Must be if we're using this.");
- return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
-}
-
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1360,7 +1360,7 @@
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -182,7 +182,7 @@
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1426,7 +1426,7 @@
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -191,7 +191,7 @@
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
+ int increment, Address mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -346,7 +346,6 @@
// depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
- int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
@@ -356,6 +355,7 @@
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmp(done);
}
@@ -366,11 +366,12 @@
InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done);
+ const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask,
rcx, false, Assembler::zero, overflow);
__ bind(done);
- } else {
- const Address backedge_counter (rax,
+ } else { // not TieredCompilation
+ const Address backedge_counter(rax,
MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
const Address invocation_counter(rax,
@@ -400,16 +401,16 @@
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
- __ cmp32(rcx,
- ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+ __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+ __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
}
- __ cmp32(rcx,
- ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+ __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+ __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ jcc(Assembler::aboveEqual, *overflow);
__ bind(done);
}
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -299,7 +299,6 @@
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
- int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
@@ -309,6 +308,7 @@
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmp(done);
}
@@ -318,10 +318,11 @@
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done);
+ const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
false, Assembler::zero, overflow);
__ bind(done);
- } else {
+ } else { // not TieredCompilation
const Address backedge_counter(rax,
MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
@@ -350,14 +351,16 @@
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
- __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+ __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+ __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
}
- __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+ __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+ __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ jcc(Assembler::aboveEqual, *overflow);
__ bind(done);
}
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1621,7 +1621,6 @@
if (TieredCompilation) {
Label no_mdo;
int increment = InvocationCounter::count_increment;
- int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@@ -1630,6 +1629,7 @@
// Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
@@ -1637,9 +1637,10 @@
__ bind(no_mdo);
// Increment backedge counter in MethodCounters*
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
+ const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
- } else {
+ } else { // not TieredCompilation
// increment counter
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
@@ -1653,8 +1654,7 @@
if (ProfileInterpreter) {
// Test to see if we should create a method data oop
- __ cmp32(rax,
- ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+ __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method
@@ -1662,8 +1662,7 @@
if (UseOnStackReplacement) {
// check for overflow against rbx, which is the MDO taken count
- __ cmp32(rbx,
- ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+ __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes from the
@@ -1678,8 +1677,7 @@
} else {
if (UseOnStackReplacement) {
// check for overflow against rax, which is the sum of the counters
- __ cmp32(rax,
- ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+ __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
}
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1642,7 +1642,6 @@
if (TieredCompilation) {
Label no_mdo;
int increment = InvocationCounter::count_increment;
- int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@@ -1651,6 +1650,7 @@
// Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
+ const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
@@ -1658,9 +1658,10 @@
__ bind(no_mdo);
// Increment backedge counter in MethodCounters*
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
+ const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
- } else {
+ } else { // not TieredCompilation
// increment counter
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
@@ -1674,8 +1675,7 @@
if (ProfileInterpreter) {
// Test to see if we should create a method data oop
- __ cmp32(rax,
- ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+ __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method
@@ -1683,8 +1683,7 @@
if (UseOnStackReplacement) {
// check for overflow against ebx which is the MDO taken count
- __ cmp32(rbx,
- ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+ __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes
@@ -1702,8 +1701,7 @@
if (UseOnStackReplacement) {
// check for overflow against eax, which is the sum of the
// counters
- __ cmp32(rax,
- ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+ __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
}
--- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -31,6 +31,7 @@
#include "os_aix.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp"
+#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
// put OS-includes here
@@ -196,12 +197,37 @@
return pid;
}
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+static bool is_statbuf_secure(struct stat *statp) {
+ if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+ // The path represents a link or some non-directory file type,
+ // which is not what we expected. Declare it insecure.
+ //
+ return false;
+ }
+ // We have an existing directory, check if the permissions are safe.
+ if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+ // The directory is open for writing and could be subjected
+ // to a symlink or a hard link attack. Declare it insecure.
+ return false;
+ }
+ // See if the uid of the directory matches the effective uid of the process.
+ //
+ if (statp->st_uid != geteuid()) {
+ // The directory was not created by this user, declare it insecure.
+ return false;
+ }
+ return true;
+}
-// check if the given path is considered a secure directory for
+
+// Check if the given path is considered a secure directory for
// the backing store files. Returns true if the directory exists
// and is considered a secure location. Returns false if the path
// is a symbolic link or if an error occurred.
-//
static bool is_directory_secure(const char* path) {
struct stat statbuf;
int result = 0;
@@ -211,38 +237,276 @@
return false;
}
- // the path exists, now check it's mode
- if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
- // the path represents a link or some non-directory file type,
- // which is not what we expected. declare it insecure.
- //
+ // The path exists, see if it is secure.
+ return is_statbuf_secure(&statbuf);
+}
+
+// (Taken over from Solaris to support the O_NOFOLLOW case on AIX.)
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+static bool is_dirfd_secure(int dir_fd) {
+ struct stat statbuf;
+ int result = 0;
+
+ RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+ if (result == OS_ERR) {
+ return false;
+ }
+
+ // The path exists, now check its mode.
+ return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+static bool is_same_fsobject(int fd1, int fd2) {
+ struct stat statbuf1;
+ struct stat statbuf2;
+ int result = 0;
+
+ RESTARTABLE(::fstat(fd1, &statbuf1), result);
+ if (result == OS_ERR) {
+ return false;
+ }
+ RESTARTABLE(::fstat(fd2, &statbuf2), result);
+ if (result == OS_ERR) {
+ return false;
+ }
+
+ if ((statbuf1.st_ino == statbuf2.st_ino) &&
+ (statbuf1.st_dev == statbuf2.st_dev)) {
+ return true;
+ } else {
return false;
}
- else {
- // we have an existing directory, check if the permissions are safe.
- //
- if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
- // the directory is open for writing and could be subjected
- // to a symlnk attack. declare it insecure.
- //
- return false;
+}
+
+// Helper functions for open without O_NOFOLLOW which is not present on AIX 5.3/6.1.
+// We use the jdk6 implementation here.
+#ifndef O_NOFOLLOW
+// The O_NOFOLLOW oflag doesn't exist before solaris 5.10, this is to simulate that behaviour
+// was done in jdk 5/6 hotspot by Oracle this way
+static int open_o_nofollow_impl(const char* path, int oflag, mode_t mode, bool use_mode) {
+ struct stat orig_st;
+ struct stat new_st;
+ bool create;
+ int error;
+ int fd;
+
+ create = false;
+
+ if (lstat(path, &orig_st) != 0) {
+ if (errno == ENOENT && (oflag & O_CREAT) != 0) {
+ // File doesn't exist, but_we want to create it, add O_EXCL flag
+ // to make sure no-one creates it (or a symlink) before us
+ // This works as we expect with symlinks, from posix man page:
+ // 'If O_EXCL and O_CREAT are set, and path names a symbolic
+ // link, open() shall fail and set errno to [EEXIST]'.
+ oflag |= O_EXCL;
+ create = true;
+ } else {
+ // File doesn't exist, and we are not creating it.
+ return OS_ERR;
+ }
+ } else {
+ // Lstat success, check if existing file is a link.
+ if ((orig_st.st_mode & S_IFMT) == S_IFLNK) {
+ // File is a symlink.
+ errno = ELOOP;
+ return OS_ERR;
+ }
+ }
+
+ if (use_mode == true) {
+ fd = open(path, oflag, mode);
+ } else {
+ fd = open(path, oflag);
+ }
+
+ if (fd == OS_ERR) {
+ return fd;
+ }
+
+ // Can't do inode checks on before/after if we created the file.
+ if (create == false) {
+ if (fstat(fd, &new_st) != 0) {
+ // Keep errno from fstat, in case close also fails.
+ error = errno;
+ ::close(fd);
+ errno = error;
+ return OS_ERR;
+ }
+
+ if (orig_st.st_dev != new_st.st_dev || orig_st.st_ino != new_st.st_ino) {
+ // File was tampered with during race window.
+ ::close(fd);
+ errno = EEXIST;
+ if (PrintMiscellaneous && Verbose) {
+ warning("possible file tampering attempt detected when opening %s", path);
+ }
+ return OS_ERR;
}
}
+
+ return fd;
+}
+
+static int open_o_nofollow(const char* path, int oflag, mode_t mode) {
+ return open_o_nofollow_impl(path, oflag, mode, true);
+}
+
+static int open_o_nofollow(const char* path, int oflag) {
+ return open_o_nofollow_impl(path, oflag, 0, false);
+}
+#endif
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+static DIR *open_directory_secure(const char* dirname) {
+ // Open the directory using open() so that it can be verified
+ // to be secure by calling is_dirfd_secure(), opendir() and then check
+ // to see if they are the same file system object. This method does not
+ // introduce a window of opportunity for the directory to be attacked that
+ // calling opendir() and is_directory_secure() does.
+ int result;
+ DIR *dirp = NULL;
+
+ // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+ // so provide a workaround in this case.
+#ifdef O_NOFOLLOW
+ RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+#else
+ // workaround (jdk6 coding)
+ RESTARTABLE(::open_o_nofollow(dirname, O_RDONLY), result);
+#endif
+
+ if (result == OS_ERR) {
+ // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+ if (PrintMiscellaneous && Verbose) {
+ if (errno == ELOOP) {
+ warning("directory %s is a symlink and is not secure\n", dirname);
+ } else {
+ warning("could not open directory %s: %s\n", dirname, strerror(errno));
+ }
+ }
+ return dirp;
+ }
+ int fd = result;
+
+ // Determine if the open directory is secure.
+ if (!is_dirfd_secure(fd)) {
+ // The directory is not a secure directory.
+ os::close(fd);
+ return dirp;
+ }
+
+ // Open the directory.
+ dirp = ::opendir(dirname);
+ if (dirp == NULL) {
+ // The directory doesn't exist, close fd and return.
+ os::close(fd);
+ return dirp;
+ }
+
+ // Check to make sure fd and dirp are referencing the same file system object.
+ if (!is_same_fsobject(fd, dirp->dd_fd)) {
+ // The directory is not secure.
+ os::close(fd);
+ os::closedir(dirp);
+ dirp = NULL;
+ return dirp;
+ }
+
+ // Close initial open now that we know directory is secure
+ os::close(fd);
+
+ return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions. Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+ // Open the directory.
+ DIR* dirp = open_directory_secure(dirname);
+ if (dirp == NULL) {
+ // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+ return dirp;
+ }
+ int fd = dirp->dd_fd;
+
+ // Open a fd to the cwd and save it off.
+ int result;
+ RESTARTABLE(::open(".", O_RDONLY), result);
+ if (result == OS_ERR) {
+ *saved_cwd_fd = -1;
+ } else {
+ *saved_cwd_fd = result;
+ }
+
+ // Set the current directory to dirname by using the fd of the directory.
+ result = fchdir(fd);
+
+ return dirp;
+}
+
+// Close the directory and restore the current working directory.
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+ int result;
+ // If we have a saved cwd change back to it and close the fd.
+ if (saved_cwd_fd != -1) {
+ result = fchdir(saved_cwd_fd);
+ ::close(saved_cwd_fd);
+ }
+
+ // Close the directory.
+ os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+static bool is_file_secure(int fd, const char *filename) {
+
+ int result;
+ struct stat statbuf;
+
+ // Determine if the file is secure.
+ RESTARTABLE(::fstat(fd, &statbuf), result);
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("fstat failed on %s: %s\n", filename, strerror(errno));
+ }
+ return false;
+ }
+ if (statbuf.st_nlink > 1) {
+ // A file with multiple links is not expected.
+ if (PrintMiscellaneous && Verbose) {
+ warning("file %s has multiple links\n", filename);
+ }
+ return false;
+ }
return true;
}
-
-// return the user name for the given user id
+// Return the user name for the given user id.
//
-// the caller is expected to free the allocated memory.
-//
+// The caller is expected to free the allocated memory.
static char* get_user_name(uid_t uid) {
struct passwd pwent;
- // determine the max pwbuf size from sysconf, and hardcode
+ // Determine the max pwbuf size from sysconf, and hardcode
// a default if this not available through sysconf.
- //
long bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);
if (bufsize == -1)
bufsize = 1024;
@@ -344,7 +608,8 @@
strcat(usrdir_name, "/");
strcat(usrdir_name, dentry->d_name);
- DIR* subdirp = os::opendir(usrdir_name);
+ // Open the user directory.
+ DIR* subdirp = open_directory_secure(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name);
@@ -464,28 +729,7 @@
}
}
-
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
- size_t nbytes = strlen(dirname) + strlen(filename) + 2;
- char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
- strcpy(path, dirname);
- strcat(path, "/");
- strcat(path, filename);
-
- remove_file(path);
-
- FREE_C_HEAP_ARRAY(char, path);
-}
-
-
-// cleanup stale shared memory resources
+// Cleanup stale shared memory resources
//
// This method attempts to remove all stale shared memory files in
// the named user temporary directory. It scans the named directory
@@ -493,33 +737,26 @@
// process id is extracted from the file name and a test is run to
// determine if the process is alive. If the process is not alive,
// any stale file resources are removed.
-//
static void cleanup_sharedmem_resources(const char* dirname) {
- // open the user temp directory
- DIR* dirp = os::opendir(dirname);
-
+ int saved_cwd_fd;
+ // Open the directory.
+ DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
if (dirp == NULL) {
- // directory doesn't exist, so there is nothing to cleanup
+ // Directory doesn't exist or is insecure, so there is nothing to cleanup.
return;
}
- if (!is_directory_secure(dirname)) {
- // the directory is not a secure directory
- os::closedir(dirp);
- return;
- }
-
- // for each entry in the directory that matches the expected file
+ // For each entry in the directory that matches the expected file
// name pattern, determine if the file resources are stale and if
// so, remove the file resources. Note, instrumented HotSpot processes
// for this user may start and/or terminate during this search and
// remove or create new files in this directory. The behavior of this
// loop under these conditions is dependent upon the implementation of
// opendir/readdir.
- //
struct dirent* entry;
char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
errno = 0;
while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
@@ -529,56 +766,55 @@
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
- // attempt to remove all unexpected files, except "." and ".."
- remove_file(dirname, entry->d_name);
+ // Attempt to remove all unexpected files, except "." and "..".
+ unlink(entry->d_name);
}
errno = 0;
continue;
}
- // we now have a file name that converts to a valid integer
+ // We now have a file name that converts to a valid integer
// that could represent a process id . if this process id
// matches the current process id or the process is not running,
// then remove the stale file resources.
//
- // process liveness is detected by sending signal number 0 to
+ // Process liveness is detected by sending signal number 0 to
// the process id (see kill(2)). if kill determines that the
// process does not exist, then the file resources are removed.
// if kill determines that that we don't have permission to
// signal the process, then the file resources are assumed to
// be stale and are removed because the resources for such a
// process should be in a different user specific directory.
- //
if ((pid == os::current_process_id()) ||
(kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
- remove_file(dirname, entry->d_name);
+ unlink(entry->d_name);
}
errno = 0;
}
- os::closedir(dirp);
- FREE_C_HEAP_ARRAY(char, dbuf);
+
+ // Close the directory and reset the current working directory.
+ close_directory_secure_cwd(dirp, saved_cwd_fd);
+
+ FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
}
-// make the user specific temporary directory. Returns true if
+// Make the user specific temporary directory. Returns true if
// the directory exists and is secure upon return. Returns false
// if the directory exists but is either a symlink, is otherwise
// insecure, or if an error occurred.
-//
static bool make_user_tmp_dir(const char* dirname) {
- // create the directory with 0755 permissions. note that the directory
+ // Create the directory with 0755 permissions. note that the directory
// will be owned by euid::egid, which may not be the same as uid::gid.
- //
if (mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) == OS_ERR) {
if (errno == EEXIST) {
// The directory already exists and was probably created by another
// JVM instance. However, this could also be the result of a
// deliberate symlink. Verify that the existing directory is safe.
- //
if (!is_directory_secure(dirname)) {
- // directory is not secure
+ // Directory is not secure.
if (PrintMiscellaneous && Verbose) {
warning("%s directory is insecure\n", dirname);
}
@@ -614,19 +850,63 @@
return -1;
}
+ int saved_cwd_fd;
+ // Open the directory and set the current working directory to it.
+ DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+ if (dirp == NULL) {
+ // Directory doesn't exist or is insecure, so cannot create shared
+ // memory file.
+ return -1;
+ }
+
+ // Open the filename in the current directory.
+ // Cannot use O_TRUNC here; truncation of an existing file has to happen
+ // after the is_file_secure() check below.
int result;
- RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+ // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+ // so provide a workaround in this case.
+#ifdef O_NOFOLLOW
+ RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
+#else
+ // workaround function (jdk6 code)
+ RESTARTABLE(::open_o_nofollow(filename, O_RDWR|O_CREAT, S_IREAD|S_IWRITE), result);
+#endif
+
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
- warning("could not create file %s: %s\n", filename, strerror(errno));
+ if (errno == ELOOP) {
+ warning("file %s is a symlink and is not secure\n", filename);
+ } else {
+ warning("could not create file %s: %s\n", filename, strerror(errno));
+ }
}
+ // Close the directory and reset the current working directory.
+ close_directory_secure_cwd(dirp, saved_cwd_fd);
+
return -1;
}
+ // Close the directory and reset the current working directory.
+ close_directory_secure_cwd(dirp, saved_cwd_fd);
// save the file descriptor
int fd = result;
+ // Check to see if the file is secure.
+ if (!is_file_secure(fd, filename)) {
+ ::close(fd);
+ return -1;
+ }
+
+ // Truncate the file to get rid of any existing data.
+ RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+ if (result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not truncate shared memory file: %s\n", strerror(errno));
+ }
+ ::close(fd);
+ return -1;
+ }
// set the file size
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
@@ -648,7 +928,14 @@
// open the file
int result;
+ // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+ // so provide a workaround in this case
+#ifdef O_NOFOLLOW
RESTARTABLE(::open(filename, oflags), result);
+#else
+ RESTARTABLE(::open_o_nofollow(filename, oflags), result);
+#endif
+
if (result == OS_ERR) {
if (errno == ENOENT) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
@@ -662,8 +949,15 @@
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
}
}
+ int fd = result;
- return result;
+ // Check to see if the file is secure.
+ if (!is_file_secure(fd, filename)) {
+ ::close(fd);
+ return -1;
+ }
+
+ return fd;
}
// create a named shared memory region. returns the address of the
@@ -695,13 +989,21 @@
char* dirname = get_user_tmp_dir(user_name);
char* filename = get_sharedmem_filename(dirname, vmid);
+ // Get the short filename.
+ char* short_filename = strrchr(filename, '/');
+ if (short_filename == NULL) {
+ short_filename = filename;
+ } else {
+ short_filename++;
+ }
+
// cleanup any stale shared memory files
cleanup_sharedmem_resources(dirname);
assert(((size > 0) && (size % os::vm_page_size() == 0)),
"unexpected PerfMemory region size");
- fd = create_sharedmem_resources(dirname, filename, size);
+ fd = create_sharedmem_resources(dirname, short_filename, size);
FREE_C_HEAP_ARRAY(char, user_name);
FREE_C_HEAP_ARRAY(char, dirname);
@@ -733,6 +1035,9 @@
// clear the shared memory region
(void)::memset((void*) mapAddress, 0, size);
+ // It does not go through os api, the operation has to record from here.
+ MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
+
return mapAddress;
}
@@ -807,7 +1112,7 @@
char* mapAddress;
int result;
int fd;
- size_t size;
+ size_t size = 0;
const char* luser = NULL;
int mmap_prot;
@@ -819,12 +1124,18 @@
// constructs for the file and the shared memory mapping.
if (mode == PerfMemory::PERF_MODE_RO) {
mmap_prot = PROT_READ;
+
+ // No O_NOFOLLOW defined at buildtime, and it is not documented for open.
+#ifdef O_NOFOLLOW
+ file_flags = O_RDONLY | O_NOFOLLOW;
+#else
file_flags = O_RDONLY;
+#endif
}
else if (mode == PerfMemory::PERF_MODE_RW) {
#ifdef LATER
mmap_prot = PROT_READ | PROT_WRITE;
- file_flags = O_RDWR;
+ file_flags = O_RDWR | O_NOFOLLOW;
#else
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Unsupported access mode");
@@ -853,9 +1164,9 @@
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
- FREE_C_HEAP_ARRAY(char, dirname);
+ FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
if (luser != user) {
- FREE_C_HEAP_ARRAY(char, luser);
+ FREE_C_HEAP_ARRAY(char, luser, mtInternal);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
@@ -901,6 +1212,9 @@
"Could not map PerfMemory");
}
+ // It does not go through os api, the operation has to record from here.
+ MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
+
*addr = mapAddress;
*sizep = size;
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -428,9 +428,9 @@
}
// Diagnostic code to investigate JDK-6573254
- int res = 50115; // non-java thread
+ int res = 30115; // non-java thread
if (thread->is_Java_thread()) {
- res = 40115; // java thread
+ res = 20115; // java thread
}
// Install a win32 structured exception handler around every thread created
@@ -3791,6 +3791,7 @@
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
static CRITICAL_SECTION crit_sect;
+ static volatile jint process_exiting = 0;
int i, j;
DWORD res;
HANDLE hproc, hthr;
@@ -3798,10 +3799,10 @@
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
- } else {
+ } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
EnterCriticalSection(&crit_sect);
- if (what == EPT_THREAD) {
+ if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
// Remove from the array those handles of the threads that have completed exiting.
for (i = 0, j = 0; i < handle_count; ++i) {
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@@ -3856,7 +3857,7 @@
// The current exiting thread has stored its handle in the array, and now
// should leave the critical section before calling _endthreadex().
- } else { // what != EPT_THREAD
+ } else if (what != EPT_THREAD) {
if (handle_count > 0) {
// Before ending the process, make sure all the threads that had called
// _endthreadex() completed.
@@ -3882,24 +3883,28 @@
handle_count = 0;
}
- // End the process, not leaving critical section.
- // This makes sure no other thread executes exit-related code at the same
- // time, thus a race is avoided.
- if (what == EPT_PROCESS) {
- ::exit(exit_code);
- } else {
- _exit(exit_code);
- }
+ OrderAccess::release_store(&process_exiting, 1);
}
LeaveCriticalSection(&crit_sect);
}
+
+ if (what == EPT_THREAD) {
+ while (OrderAccess::load_acquire(&process_exiting) != 0) {
+ // Some other thread is about to call exit(), so we
+ // don't let the current thread proceed to _endthreadex()
+ SuspendThread(GetCurrentThread());
+ // Avoid busy-wait loop, if SuspendThread() failed.
+ Sleep(EXIT_TIMEOUT);
+ }
+ }
}
// We are here if either
// - there's no 'race at exit' bug on this OS release;
// - initialization of the critical section failed (unlikely);
- // - the current thread has stored its handle and left the critical section.
+ // - the current thread has stored its handle and left the critical section;
+ // - the process-exiting thread has raised the flag and left the critical section.
if (what == EPT_THREAD) {
_endthreadex((unsigned)exit_code);
} else if (what == EPT_PROCESS) {
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -601,15 +601,6 @@
LIR_Opr _addr;
LIR_Opr _new_val;
- static jbyte* _byte_map_base;
- static jbyte* byte_map_base_slow();
- static jbyte* byte_map_base() {
- if (_byte_map_base == NULL) {
- _byte_map_base = byte_map_base_slow();
- }
- return _byte_map_base;
- }
-
public:
// addr (the address of the object head) and new_val must be registers.
G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -32,6 +32,7 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vm_version.hpp"
@@ -3351,7 +3352,12 @@
if (!x->inlinee()->is_accessor()) {
CodeEmitInfo* info = state_for(x, x->state(), true);
// Notify the runtime very infrequently only to take care of counter overflows
- increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
+ int freq_log = Tier23InlineeNotifyFreqLog;
+ double scale;
+ if (_method->has_option_value("CompileThresholdScaling", scale)) {
+ freq_log = Arguments::scaled_freq_log(freq_log, scale);
+ }
+ increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
}
}
@@ -3366,7 +3372,11 @@
ShouldNotReachHere();
}
// Increment the appropriate invocation/backedge counter and notify the runtime.
- increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
+ double scale;
+ if (_method->has_option_value("CompileThresholdScaling", scale)) {
+ freq_log = Arguments::scaled_freq_log(freq_log, scale);
+ }
+ increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
}
void LIRGenerator::decrement_age(CodeEmitInfo* info) {
--- a/hotspot/src/share/vm/classfile/compactHashtable.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -221,6 +221,30 @@
return (const char*)end;
}
+template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) {
+ assert(!DumpSharedSpaces, "run-time only");
+ for (juint i = 0; i < _bucket_count; i ++) {
+ juint bucket_info = _buckets[i];
+ juint bucket_offset = BUCKET_OFFSET(bucket_info);
+ int bucket_type = BUCKET_TYPE(bucket_info);
+ juint* bucket = _buckets + bucket_offset;
+ juint* bucket_end = _buckets;
+
+ Symbol* sym;
+ if (bucket_type == COMPACT_BUCKET_TYPE) {
+ sym = (Symbol*)((void*)(_base_address + bucket[0]));
+ cl->do_symbol(&sym);
+ } else {
+ bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
+ while (bucket < bucket_end) {
+ sym = (Symbol*)((void*)(_base_address + bucket[1]));
+ cl->do_symbol(&sym);
+ bucket += 2;
+ }
+ }
+ }
+}
+
// Explicitly instantiate these types
template class CompactHashtable<Symbol*, char>;
--- a/hotspot/src/share/vm/classfile/compactHashtable.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/classfile/compactHashtable.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -249,6 +249,9 @@
}
return NULL;
}
+
+ // iterate over symbols
+ void symbols_do(SymbolClosure *cl);
};
////////////////////////////////////////////////////////////////////////
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,10 @@
// Call function for all symbols in the symbol table.
void SymbolTable::symbols_do(SymbolClosure *cl) {
+ // all symbols from shared table
+ _shared_table.symbols_do(cl);
+
+ // all symbols from the dynamic table
const int n = the_table()->table_size();
for (int i = 0; i < n; i++) {
for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
--- a/hotspot/src/share/vm/classfile/verifier.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1546,10 +1546,15 @@
no_control_flow = true; break;
case Bytecodes::_getstatic :
case Bytecodes::_putstatic :
+ // pass TRUE, operand can be an array type for getstatic/putstatic.
+ verify_field_instructions(
+ &bcs, ¤t_frame, cp, true, CHECK_VERIFY(this));
+ no_control_flow = false; break;
case Bytecodes::_getfield :
case Bytecodes::_putfield :
+ // pass FALSE, operand can't be an array type for getfield/putfield.
verify_field_instructions(
- &bcs, ¤t_frame, cp, CHECK_VERIFY(this));
+ &bcs, ¤t_frame, cp, false, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_invokevirtual :
case Bytecodes::_invokespecial :
@@ -2107,6 +2112,7 @@
void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
StackMapFrame* current_frame,
constantPoolHandle cp,
+ bool allow_arrays,
TRAPS) {
u2 index = bcs->get_index_u2();
verify_cp_type(bcs->bci(), index, cp,
@@ -2126,8 +2132,8 @@
// Get referenced class type
VerificationType ref_class_type = cp_ref_index_to_type(
index, cp, CHECK_VERIFY(this));
- if (!ref_class_type.is_object()) {
- /* Unreachable? Class file parser verifies Fieldref contents */
+ if (!ref_class_type.is_object() &&
+ (!allow_arrays || !ref_class_type.is_array())) {
verify_error(ErrorContext::bad_type(bcs->bci(),
TypeOrigin::cp(index, ref_class_type)),
"Expecting reference to class in class %s at constant pool index %d",
--- a/hotspot/src/share/vm/classfile/verifier.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -297,7 +297,7 @@
void verify_field_instructions(
RawBytecodeStream* bcs, StackMapFrame* current_frame,
- constantPoolHandle cp, TRAPS);
+ constantPoolHandle cp, bool allow_arrays, TRAPS);
void verify_invoke_init(
RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
--- a/hotspot/src/share/vm/code/codeCache.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/code/codeCache.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -233,8 +233,8 @@
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
// Determine alignment
const size_t page_size = os::can_execute_large_page_memory() ?
- MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
- os::page_size_for_region(size, 8)) :
+ MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
+ os::page_size_for_region_aligned(size, 8)) :
os::vm_page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity);
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1470,7 +1470,9 @@
// The method may be explicitly excluded by the user.
bool quietly;
- if (CompilerOracle::should_exclude(method, quietly)) {
+ double scale;
+ if (CompilerOracle::should_exclude(method, quietly)
+ || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
if (!quietly) {
// This does not happen quietly...
ResourceMark rm;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -162,8 +162,8 @@
"we should have already filtered out humongous regions");
assert(_end == orig_end(),
"we should have already filtered out humongous regions");
-
- _in_collection_set = false;
+ assert(!_in_collection_set,
+ err_msg("Should not clear heap region %u in the collection set", hrm_index()));
set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1194,8 +1194,10 @@
return real_forwardee(old);
}
- new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
- old, m, sz);
+ if (!_promotion_failed) {
+ new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+ old, m, sz);
+ }
if (new_obj == NULL) {
// promotion failed, forward to self
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -61,9 +61,9 @@
void GenerationSizer::initialize_size_info() {
trace_gen_sizes("ps heap raw");
- const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
+ const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
- const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
+ const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
const size_t page_sz = MIN2(max_page_sz, min_page_sz);
// Can a page size be something else than a power of two?
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -41,7 +41,7 @@
const size_t words = bits / BitsPerWord;
const size_t raw_bytes = words * sizeof(idx_t);
- const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -403,7 +403,7 @@
ParallelCompactData::create_vspace(size_t count, size_t element_size)
{
const size_t raw_bytes = count * element_size;
- const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -53,7 +53,7 @@
/*
* USELABELS - If using GCC, then use labels for the opcode dispatching
* rather -then a switch statement. This improves performance because it
- * gives us the oportunity to have the instructions that calculate the
+ * gives us the opportunity to have the instructions that calculate the
* next opcode to jump to be intermixed with the rest of the instructions
* that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
*/
--- a/hotspot/src/share/vm/interpreter/invocationCounter.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/interpreter/invocationCounter.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -36,7 +36,7 @@
// Implementation notes: For space reasons, state & counter are both encoded in one word,
// The state is encoded using some of the least significant bits, the counter is using the
// more significant bits. The counter is incremented before a method is activated and an
-// action is triggered when when count() > limit().
+// action is triggered when count() > limit().
class InvocationCounter VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
@@ -48,7 +48,6 @@
number_of_state_bits = 2,
number_of_carry_bits = 1,
number_of_noncount_bits = number_of_state_bits + number_of_carry_bits,
- number_of_count_bits = BitsPerInt - number_of_noncount_bits,
state_limit = nth_bit(number_of_state_bits),
count_grain = nth_bit(number_of_state_bits + number_of_carry_bits),
carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits,
@@ -68,6 +67,7 @@
count_increment = count_grain, // use this value to increment the 32bit _counter word
count_mask_value = count_mask, // use this value to mask the backedge counter
count_shift = number_of_noncount_bits,
+ number_of_count_bits = BitsPerInt - number_of_noncount_bits,
count_limit = nth_bit(number_of_count_bits - 1)
};
--- a/hotspot/src/share/vm/memory/heap.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/memory/heap.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -104,8 +104,8 @@
size_t page_size = os::vm_page_size();
if (os::can_execute_large_page_memory()) {
const size_t min_pages = 8;
- page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
- os::page_size_for_region(rs.size(), min_pages));
+ page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
+ os::page_size_for_region_aligned(rs.size(), min_pages));
}
const size_t granularity = os::vm_allocation_granularity();
--- a/hotspot/src/share/vm/oops/method.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/oops/method.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -412,15 +412,14 @@
}
methodHandle mh(m);
- ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
- MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
+ MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
if (HAS_PENDING_EXCEPTION) {
CompileBroker::log_metaspace_failure();
ClassLoaderDataGraph::set_metaspace_oom(true);
return NULL; // return the exception (which is cleared)
}
if (!mh->init_method_counters(counters)) {
- MetadataFactory::free_metadata(loader_data, counters);
+ MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters);
}
return mh->method_counters();
}
--- a/hotspot/src/share/vm/oops/methodCounters.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/oops/methodCounters.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -23,10 +23,11 @@
*/
#include "precompiled.hpp"
#include "oops/methodCounters.hpp"
-#include "runtime/thread.inline.hpp"
+#include "runtime/handles.inline.hpp"
-MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
- return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
+MethodCounters* MethodCounters::allocate(methodHandle mh, TRAPS) {
+ ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
+ return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
}
void MethodCounters::clear_counters() {
--- a/hotspot/src/share/vm/oops/methodCounters.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/oops/methodCounters.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -26,7 +26,9 @@
#define SHARE_VM_OOPS_METHODCOUNTERS_HPP
#include "oops/metadata.hpp"
+#include "compiler/compilerOracle.hpp"
#include "interpreter/invocationCounter.hpp"
+#include "runtime/arguments.hpp"
class MethodCounters: public MetaspaceObj {
friend class VMStructs;
@@ -45,7 +47,11 @@
// 3. (INT_MIN..0] - method is hot and will deopt and get
// recompiled without the counters
int _nmethod_age;
-
+ int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
+ int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
+ int _interpreter_profile_limit; // per-method InterpreterProfileLimit
+ int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
+ int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
#ifdef TIERED
float _rate; // Events (invocation and backedge counter increments) per millisecond
jlong _prev_time; // Previous time the rate was acquired
@@ -53,15 +59,15 @@
u1 _highest_osr_comp_level; // Same for OSR level
#endif
- MethodCounters() : _interpreter_invocation_count(0),
- _interpreter_throwout_count(0),
- _number_of_breakpoints(0),
- _nmethod_age(INT_MAX)
+ MethodCounters(methodHandle mh) : _interpreter_invocation_count(0),
+ _interpreter_throwout_count(0),
+ _number_of_breakpoints(0),
+ _nmethod_age(INT_MAX)
#ifdef TIERED
- , _rate(0),
- _prev_time(0),
- _highest_comp_level(0),
- _highest_osr_comp_level(0)
+ , _rate(0),
+ _prev_time(0),
+ _highest_comp_level(0),
+ _highest_osr_comp_level(0)
#endif
{
invocation_counter()->init();
@@ -70,10 +76,28 @@
if (StressCodeAging) {
set_nmethod_age(HotMethodDetectionLimit);
}
+
+ // Set per-method thresholds.
+ double scale = 1.0;
+ CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
+
+ int compile_threshold = Arguments::scaled_compile_threshold(CompileThreshold, scale);
+ _interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
+ if (ProfileInterpreter) {
+ // If interpreter profiling is enabled, the backward branch limit
+ // is compared against the method data counter rather than an invocation
+ // counter, therefore no shifting of bits is required.
+ _interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
+ } else {
+ _interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
+ }
+ _interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
+ _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+ _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
}
public:
- static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS);
+ static MethodCounters* allocate(methodHandle mh, TRAPS);
void deallocate_contents(ClassLoaderData* loader_data) {}
DEBUG_ONLY(bool on_stack() { return false; }) // for template
@@ -161,5 +185,24 @@
return offset_of(MethodCounters, _interpreter_invocation_count);
}
+ static ByteSize interpreter_invocation_limit_offset() {
+ return byte_offset_of(MethodCounters, _interpreter_invocation_limit);
+ }
+
+ static ByteSize interpreter_backward_branch_limit_offset() {
+ return byte_offset_of(MethodCounters, _interpreter_backward_branch_limit);
+ }
+
+ static ByteSize interpreter_profile_limit_offset() {
+ return byte_offset_of(MethodCounters, _interpreter_profile_limit);
+ }
+
+ static ByteSize invoke_mask_offset() {
+ return byte_offset_of(MethodCounters, _invoke_mask);
+ }
+
+ static ByteSize backedge_mask_offset() {
+ return byte_offset_of(MethodCounters, _backedge_mask);
+ }
};
#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP
--- a/hotspot/src/share/vm/oops/methodData.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/oops/methodData.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -31,6 +31,7 @@
#include "memory/heapInspection.hpp"
#include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"
@@ -1131,6 +1132,13 @@
_backedge_counter.init();
_invocation_counter_start = 0;
_backedge_counter_start = 0;
+
+ // Set per-method invoke- and backedge mask.
+ double scale = 1.0;
+ CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
+ _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+ _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+
_tenure_traps = 0;
_num_loops = 0;
_num_blocks = 0;
--- a/hotspot/src/share/vm/oops/methodData.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/oops/methodData.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -2088,6 +2088,8 @@
int _invocation_counter_start;
int _backedge_counter_start;
uint _tenure_traps;
+ int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
+ int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
#if INCLUDE_RTM_OPT
// State of RTM code generation during compilation of the method
@@ -2447,10 +2449,19 @@
static ByteSize invocation_counter_offset() {
return byte_offset_of(MethodData, _invocation_counter);
}
+
static ByteSize backedge_counter_offset() {
return byte_offset_of(MethodData, _backedge_counter);
}
+ static ByteSize invoke_mask_offset() {
+ return byte_offset_of(MethodData, _invoke_mask);
+ }
+
+ static ByteSize backedge_mask_offset() {
+ return byte_offset_of(MethodData, _backedge_mask);
+ }
+
static ByteSize parameters_type_data_di_offset() {
return byte_offset_of(MethodData, _parameters_type_data_di);
}
--- a/hotspot/src/share/vm/opto/chaitin.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/chaitin.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -582,6 +582,9 @@
// Peephole remove copies
post_allocate_copy_removal();
+ // Merge multidefs if multiple defs representing the same value are used in a single block.
+ merge_multidefs();
+
#ifdef ASSERT
// Veify the graph after RA.
verify(&live_arena);
--- a/hotspot/src/share/vm/opto/chaitin.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/chaitin.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -681,6 +681,32 @@
// Extend the node to LRG mapping
void add_reference( const Node *node, const Node *old_node);
+ // Record the first use of a def in the block for a register.
+ class RegDefUse {
+ Node* _def;
+ Node* _first_use;
+ public:
+ RegDefUse() : _def(NULL), _first_use(NULL) { }
+ Node* def() const { return _def; }
+ Node* first_use() const { return _first_use; }
+
+ void update(Node* def, Node* use) {
+ if (_def != def) {
+ _def = def;
+ _first_use = use;
+ }
+ }
+ void clear() {
+ _def = NULL;
+ _first_use = NULL;
+ }
+ };
+ typedef GrowableArray<RegDefUse> RegToDefUseMap;
+ int possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse);
+
+ // Merge nodes that are a part of a multidef lrg and produce the same value within a block.
+ void merge_multidefs();
+
private:
static int _final_loads, _final_stores, _final_copies, _final_memoves;
--- a/hotspot/src/share/vm/opto/doCall.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -94,7 +94,7 @@
if (log != NULL) {
int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
- log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
+ log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
log->identify(callee), site_count, prof_factor);
if (call_does_dispatch) log->print(" virtual='1'");
if (allow_inline) log->print(" inline='1'");
--- a/hotspot/src/share/vm/opto/escape.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -2010,14 +2010,9 @@
bt = field->layout_type();
} else {
// Check for unsafe oop field access
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- int opcode = n->fast_out(i)->Opcode();
- if (opcode == Op_StoreP || opcode == Op_LoadP ||
- opcode == Op_StoreN || opcode == Op_LoadN) {
- bt = T_OBJECT;
- (*unsafe) = true;
- break;
- }
+ if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
+ bt = T_OBJECT;
+ (*unsafe) = true;
}
}
} else if (adr_type->isa_aryptr()) {
@@ -2031,13 +2026,8 @@
}
} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
// Allocation initialization, ThreadLocal field access, unsafe access
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- int opcode = n->fast_out(i)->Opcode();
- if (opcode == Op_StoreP || opcode == Op_LoadP ||
- opcode == Op_StoreN || opcode == Op_LoadN) {
- bt = T_OBJECT;
- break;
- }
+ if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
+ bt = T_OBJECT;
}
}
}
@@ -3092,13 +3082,7 @@
continue;
} else if (n->Opcode() == Op_EncodeISOArray) {
// get the memory projection
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- Node *use = n->fast_out(i);
- if (use->Opcode() == Op_SCMemProj) {
- n = use;
- break;
- }
- }
+ n = n->find_out_with(Op_SCMemProj);
assert(n->Opcode() == Op_SCMemProj, "memory projection required");
} else {
assert(n->is_Mem(), "memory node required.");
@@ -3122,13 +3106,7 @@
continue; // don't push users
} else if (n->is_LoadStore()) {
// get the memory projection
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- Node *use = n->fast_out(i);
- if (use->Opcode() == Op_SCMemProj) {
- n = use;
- break;
- }
- }
+ n = n->find_out_with(Op_SCMemProj);
assert(n->Opcode() == Op_SCMemProj, "memory projection required");
}
}
--- a/hotspot/src/share/vm/opto/ifg.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/ifg.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -535,12 +535,8 @@
// The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
// when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
// in block in such order that KILL MachProj nodes are processed first.
- uint cnt = def->outcnt();
- for (uint i = 0; i < cnt; i++) {
- Node* proj = def->raw_out(i);
- if (proj->Opcode() == Op_SCMemProj) {
- return false;
- }
+ if (def->has_out_with(Op_SCMemProj)) {
+ return false;
}
}
b->remove_node(location);
--- a/hotspot/src/share/vm/opto/loopTransform.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -2057,10 +2057,9 @@
}
Node *main_cmp = main_bol->in(1);
if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
- _igvn.hash_delete(main_bol);
main_cmp = main_cmp->clone();// Clone a private CmpNode
register_new_node( main_cmp, main_cle->in(0) );
- main_bol->set_req(1,main_cmp);
+ _igvn.replace_input_of(main_bol, 1, main_cmp);
}
// Hack the now-private loop bounds
_igvn.replace_input_of(main_cmp, 2, main_limit);
--- a/hotspot/src/share/vm/opto/machnode.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/machnode.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -616,6 +616,29 @@
#endif
};
+// MachMergeNode is similar to a PhiNode in a sense it merges multiple values,
+// however it doesn't have a control input and is more like a MergeMem.
+// It is inserted after the register allocation is done to ensure that nodes use single
+// definition of a multidef lrg in a block.
+class MachMergeNode : public MachIdealNode {
+public:
+ MachMergeNode(Node *n1) {
+ init_class_id(Class_MachMerge);
+ add_req(NULL);
+ add_req(n1);
+ }
+ virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
+ virtual const RegMask &in_RegMask(uint idx) const { return in(1)->in_RegMask(idx); }
+ virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
+ virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
+ virtual uint oper_input_base() const { return 1; }
+ virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
+ virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
+#ifndef PRODUCT
+ virtual const char *Name() const { return "MachMerge"; }
+#endif
+};
+
//------------------------------MachBranchNode--------------------------------
// Abstract machine branch Node
class MachBranchNode : public MachIdealNode {
--- a/hotspot/src/share/vm/opto/macro.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/macro.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -258,14 +258,7 @@
// Search for CastP2X->Xor->URShift->Cmp path which
// checks if the store done to a different from the value's region.
// And replace Cmp with #0 (false) to collapse G1 post barrier.
- Node* xorx = NULL;
- for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
- Node* u = p2x->fast_out(i);
- if (u->Opcode() == Op_XorX) {
- xorx = u;
- break;
- }
- }
+ Node* xorx = p2x->find_out_with(Op_XorX);
assert(xorx != NULL, "missing G1 post barrier");
Node* shift = xorx->unique_out();
Node* cmpx = shift->unique_out();
--- a/hotspot/src/share/vm/opto/memnode.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -2609,7 +2609,6 @@
return false; // if not a distinct instance, there may be aliases of the address
for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
Node *use = adr->fast_out(i);
- int opc = use->Opcode();
if (use->is_Load() || use->is_LoadStore()) {
return false;
}
--- a/hotspot/src/share/vm/opto/node.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/node.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -881,6 +881,34 @@
return (Node*) this;
}
+// Find out of current node that matches opcode.
+Node* Node::find_out_with(int opcode) {
+ for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+ Node* use = fast_out(i);
+ if (use->Opcode() == opcode) {
+ return use;
+ }
+ }
+ return NULL;
+}
+
+// Return true if the current node has an out that matches opcode.
+bool Node::has_out_with(int opcode) {
+ return (find_out_with(opcode) != NULL);
+}
+
+// Return true if the current node has an out that matches any of the opcodes.
+bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
+ for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+ int opcode = fast_out(i)->Opcode();
+ if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
//---------------------------uncast_helper-------------------------------------
Node* Node::uncast_helper(const Node* p) {
#ifdef ASSERT
--- a/hotspot/src/share/vm/opto/node.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/node.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -98,6 +98,7 @@
class MachSafePointNode;
class MachSpillCopyNode;
class MachTempNode;
+class MachMergeNode;
class Matcher;
class MemBarNode;
class MemBarStoreStoreNode;
@@ -436,6 +437,13 @@
return (this->uncast() == n->uncast());
}
+ // Find out of current node that matches opcode.
+ Node* find_out_with(int opcode);
+ // Return true if the current node has an out that matches opcode.
+ bool has_out_with(int opcode);
+ // Return true if the current node has an out that matches any of the opcodes.
+ bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
+
private:
static Node* uncast_helper(const Node* n);
@@ -507,18 +515,25 @@
//----------------- Other Node Properties
- // Generate class id for some ideal nodes to avoid virtual query
- // methods is_<Node>().
- // Class id is the set of bits corresponded to the node class and all its
- // super classes so that queries for super classes are also valid.
- // Subclasses of the same super class have different assigned bit
- // (the third parameter in the macro DEFINE_CLASS_ID).
- // Classes with deeper hierarchy are declared first.
- // Classes with the same hierarchy depth are sorted by usage frequency.
+ // Generate class IDs for (some) ideal nodes so that it is possible to determine
+ // the type of a node using a non-virtual method call (the method is_<Node>() below).
+ //
+ // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
+ // the type of the node the ID represents; another subset of an ID's bits are reserved
+ // for the superclasses of the node represented by the ID.
+ //
+ // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
+ // returns false. A.is_A() returns true.
//
- // The query method masks the bits to cut off bits of subclasses
- // and then compare the result with the class id
- // (see the macro DEFINE_CLASS_QUERY below).
+ // If two classes, A and B, have the same superclass, a different bit of A's class id
+ // is reserved for A's type than for B's type. That bit is specified by the third
+ // parameter in the macro DEFINE_CLASS_ID.
+ //
+ // By convention, classes with deeper hierarchy are declared first. Moreover,
+ // classes with the same hierarchy depth are sorted by usage frequency.
+ //
+ // The query method masks the bits to cut off bits of subclasses and then compares
+ // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
//
// Class_MachCall=30, ClassMask_MachCall=31
// 12 8 4 0
@@ -592,6 +607,7 @@
DEFINE_CLASS_ID(MachTemp, Mach, 3)
DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
DEFINE_CLASS_ID(MachConstant, Mach, 5)
+ DEFINE_CLASS_ID(MachMerge, Mach, 6)
DEFINE_CLASS_ID(Type, Node, 2)
DEFINE_CLASS_ID(Phi, Type, 0)
@@ -763,6 +779,7 @@
DEFINE_CLASS_QUERY(MachSafePoint)
DEFINE_CLASS_QUERY(MachSpillCopy)
DEFINE_CLASS_QUERY(MachTemp)
+ DEFINE_CLASS_QUERY(MachMerge)
DEFINE_CLASS_QUERY(Mem)
DEFINE_CLASS_QUERY(MemBar)
DEFINE_CLASS_QUERY(MemBarStoreStore)
--- a/hotspot/src/share/vm/opto/parse1.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/parse1.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -441,7 +441,7 @@
CompileLog* log = C->log();
if (log != NULL) {
- log->begin_head("parse method='%d' uses='%g'",
+ log->begin_head("parse method='%d' uses='%f'",
log->identify(parse_method), expected_uses);
if (depth() == 1 && C->is_osr_compilation()) {
log->print(" osr_bci='%d'", C->entry_bci());
--- a/hotspot/src/share/vm/opto/parse2.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/parse2.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -832,7 +832,7 @@
sprintf(prob_str_buf, "%g", prob);
prob_str = prob_str_buf;
}
- C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
+ C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
iter().get_dest(), taken, not_taken, cnt, prob_str);
}
return prob;
--- a/hotspot/src/share/vm/opto/phase.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/phase.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -110,6 +110,7 @@
tty->print_cr (" Compute Liveness: %7.3f s", timers[_t_computeLive].seconds());
tty->print_cr (" Regalloc Split: %7.3f s", timers[_t_regAllocSplit].seconds());
tty->print_cr (" Postalloc Copy Rem: %7.3f s", timers[_t_postAllocCopyRemoval].seconds());
+ tty->print_cr (" Merge multidefs: %7.3f s", timers[_t_mergeMultidefs].seconds());
tty->print_cr (" Fixup Spills: %7.3f s", timers[_t_fixupSpills].seconds());
tty->print_cr (" Compact: %7.3f s", timers[_t_chaitinCompact].seconds());
tty->print_cr (" Coalesce 1: %7.3f s", timers[_t_chaitinCoalesce1].seconds());
@@ -126,6 +127,7 @@
timers[_t_computeLive].seconds() +
timers[_t_regAllocSplit].seconds() +
timers[_t_postAllocCopyRemoval].seconds() +
+ timers[_t_mergeMultidefs].seconds() +
timers[_t_fixupSpills].seconds() +
timers[_t_chaitinCompact].seconds() +
timers[_t_chaitinCoalesce1].seconds() +
--- a/hotspot/src/share/vm/opto/phase.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/phase.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -88,6 +88,7 @@
_t_computeLive,
_t_regAllocSplit,
_t_postAllocCopyRemoval,
+ _t_mergeMultidefs,
_t_fixupSpills,
_t_chaitinCompact,
_t_chaitinCoalesce1,
--- a/hotspot/src/share/vm/opto/postaloc.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/postaloc.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -263,20 +263,6 @@
// intermediate copies might be illegal, i.e., value is stored down to stack
// then reloaded BUT survives in a register the whole way.
Node *val = skip_copies(n->in(k));
-
- if (val == x && nk_idx != 0 &&
- regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
- _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
- // When rematerialzing nodes and stretching lifetimes, the
- // allocator will reuse the original def for multidef LRG instead
- // of the current reaching def because it can't know it's safe to
- // do so. After allocation completes if they are in the same LRG
- // then it should use the current reaching def instead.
- n->set_req(k, regnd[nk_reg]);
- blk_adjust += yank_if_dead(val, current_block, &value, ®nd);
- val = skip_copies(n->in(k));
- }
-
if (val == x) return blk_adjust; // No progress?
int n_regs = RegMask::num_registers(val->ideal_reg());
@@ -382,6 +368,94 @@
return false;
}
+// The algorithms works as follows:
+// We traverse the block top to bottom. possibly_merge_multidef() is invoked for every input edge k
+// of the instruction n. We check to see if the input is a multidef lrg. If it is, we record the fact that we've
+// seen a definition (coming as an input) and add that fact to the reg2defuse array. The array maps registers to their
+// current reaching definitions (we track only multidefs though). With each definition we also associate the first
+// instruction we saw use it. If we encounter the situation when we observe an def (an input) that is a part of the
+// same lrg but is different from the previous seen def we merge the two with a MachMerge node and substitute
+// all the uses that we've seen so far to use the merge. After that we keep replacing the new defs in the same lrg
+// as they get encountered with the merge node and keep adding these defs to the merge inputs.
+void PhaseChaitin::merge_multidefs() {
+ Compile::TracePhase tp("mergeMultidefs", &timers[_t_mergeMultidefs]);
+ ResourceMark rm;
+ // Keep track of the defs seen in registers and collect their uses in the block.
+ RegToDefUseMap reg2defuse(_max_reg, _max_reg, RegDefUse());
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
+ for (uint j = 1; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
+ if (n->is_Phi()) continue;
+ for (uint k = 1; k < n->req(); k++) {
+ j += possibly_merge_multidef(n, k, block, reg2defuse);
+ }
+ // Null out the value produced by the instruction itself, since we're only interested in defs
+ // implicitly defined by the uses. We are actually interested in tracking only redefinitions
+ // of the multidef lrgs in the same register. For that matter it's enough to track changes in
+ // the base register only and ignore other effects of multi-register lrgs and fat projections.
+ // It is also ok to ignore defs coming from singledefs. After an implicit overwrite by one of
+ // those our register is guaranteed to be used by another lrg and we won't attempt to merge it.
+ uint lrg = _lrg_map.live_range_id(n);
+ if (lrg > 0 && lrgs(lrg).is_multidef()) {
+ OptoReg::Name reg = lrgs(lrg).reg();
+ reg2defuse.at(reg).clear();
+ }
+ }
+ // Clear reg->def->use tracking for the next block
+ for (int j = 0; j < reg2defuse.length(); j++) {
+ reg2defuse.at(j).clear();
+ }
+ }
+}
+
+int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse) {
+ int blk_adjust = 0;
+
+ uint lrg = _lrg_map.live_range_id(n->in(k));
+ if (lrg > 0 && lrgs(lrg).is_multidef()) {
+ OptoReg::Name reg = lrgs(lrg).reg();
+
+ Node* def = reg2defuse.at(reg).def();
+ if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
+ // Same lrg but different node, we have to merge.
+ MachMergeNode* merge;
+ if (def->is_MachMerge()) { // is it already a merge?
+ merge = def->as_MachMerge();
+ } else {
+ merge = new MachMergeNode(def);
+
+ // Insert the merge node into the block before the first use.
+ uint use_index = block->find_node(reg2defuse.at(reg).first_use());
+ block->insert_node(merge, use_index++);
+
+ // Let the allocator know about the new node, use the same lrg
+ _lrg_map.extend(merge->_idx, lrg);
+ blk_adjust++;
+
+ // Fixup all the uses (there is at least one) that happened between the first
+ // use and before the current one.
+ for (; use_index < block->number_of_nodes(); use_index++) {
+ Node* use = block->get_node(use_index);
+ if (use == n) {
+ break;
+ }
+ use->replace_edge(def, merge);
+ }
+ }
+ if (merge->find_edge(n->in(k)) == -1) {
+ merge->add_req(n->in(k));
+ }
+ n->set_req(k, merge);
+ }
+
+ // update the uses
+ reg2defuse.at(reg).update(n->in(k), n);
+ }
+
+ return blk_adjust;
+}
+
//------------------------------post_allocate_copy_removal---------------------
// Post-Allocation peephole copy removal. We do this in 1 pass over the
--- a/hotspot/src/share/vm/opto/stringopts.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/opto/stringopts.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1507,10 +1507,12 @@
}
case StringConcat::StringMode: {
const Type* type = kit.gvn().type(arg);
+ Node* count = NULL;
if (type == TypePtr::NULL_PTR) {
// replace the argument with the null checked version
arg = null_string;
sc->set_argument(argi, arg);
+ count = kit.load_String_length(kit.control(), arg);
} else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
// s = s != null ? s : "null";
// length = length + (s.count - s.offset);
@@ -1533,10 +1535,13 @@
// replace the argument with the null checked version
arg = phi;
sc->set_argument(argi, arg);
+ count = kit.load_String_length(kit.control(), arg);
+ } else {
+ // A corresponding nullcheck will be connected during IGVN MemNode::Ideal_common_DU_postCCP
+ // kit.control might be a different test, that can be hoisted above the actual nullcheck
+ // in case, that the control input is not null, Ideal_common_DU_postCCP will not look for a nullcheck.
+ count = kit.load_String_length(NULL, arg);
}
-
- Node* count = kit.load_String_length(kit.control(), arg);
-
length = __ AddI(length, count);
string_sizes->init_req(argi, NULL);
break;
--- a/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -68,11 +68,11 @@
~JvmtiConstantPoolReconstituter() {
if (_symmap != NULL) {
- os::free(_symmap);
+ delete _symmap;
_symmap = NULL;
}
if (_classmap != NULL) {
- os::free(_classmap);
+ delete _classmap;
_classmap = NULL;
}
}
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1046,10 +1046,16 @@
{
assert(str->klass() == SystemDictionary::String_klass(), "not a string");
+ typeArrayOop s_value = java_lang_String::value(str);
+
+ // JDK-6584008: the value field may be null if a String instance is
+ // partially constructed.
+ if (s_value == NULL) {
+ return 0;
+ }
// get the string value and length
// (string value may be offset from the base)
int s_len = java_lang_String::length(str);
- typeArrayOop s_value = java_lang_String::value(str);
int s_offset = java_lang_String::offset(str);
jchar* value;
if (s_len > 0) {
--- a/hotspot/src/share/vm/prims/perf.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/prims/perf.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -100,6 +100,11 @@
PerfWrapper("Perf_Detach");
+ if (!UsePerfData) {
+ // With -XX:-UsePerfData, detach is just a NOP
+ return;
+ }
+
void* address = 0;
jlong capacity = 0;
--- a/hotspot/src/share/vm/prims/unsafe.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/prims/unsafe.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1104,43 +1104,6 @@
-UNSAFE_ENTRY(void, Unsafe_MonitorEnter(JNIEnv *env, jobject unsafe, jobject jobj))
- UnsafeWrapper("Unsafe_MonitorEnter");
- {
- if (jobj == NULL) {
- THROW(vmSymbols::java_lang_NullPointerException());
- }
- Handle obj(thread, JNIHandles::resolve_non_null(jobj));
- ObjectSynchronizer::jni_enter(obj, CHECK);
- }
-UNSAFE_END
-
-
-UNSAFE_ENTRY(jboolean, Unsafe_TryMonitorEnter(JNIEnv *env, jobject unsafe, jobject jobj))
- UnsafeWrapper("Unsafe_TryMonitorEnter");
- {
- if (jobj == NULL) {
- THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
- }
- Handle obj(thread, JNIHandles::resolve_non_null(jobj));
- bool res = ObjectSynchronizer::jni_try_enter(obj, CHECK_0);
- return (res ? JNI_TRUE : JNI_FALSE);
- }
-UNSAFE_END
-
-
-UNSAFE_ENTRY(void, Unsafe_MonitorExit(JNIEnv *env, jobject unsafe, jobject jobj))
- UnsafeWrapper("Unsafe_MonitorExit");
- {
- if (jobj == NULL) {
- THROW(vmSymbols::java_lang_NullPointerException());
- }
- Handle obj(THREAD, JNIHandles::resolve_non_null(jobj));
- ObjectSynchronizer::jni_exit(obj(), CHECK);
- }
-UNSAFE_END
-
-
UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr))
UnsafeWrapper("Unsafe_ThrowException");
{
@@ -1365,8 +1328,6 @@
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
- {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
- {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
};
@@ -1411,8 +1372,6 @@
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
- {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
- {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
};
@@ -1461,8 +1420,6 @@
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
- {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
- {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
@@ -1515,9 +1472,6 @@
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
- {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
- {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
- {CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
@@ -1571,9 +1525,6 @@
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
- {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
- {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
- {CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -155,7 +155,7 @@
if (mdo != NULL) {
int i = mdo->invocation_count_delta();
int b = mdo->backedge_count_delta();
- return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
+ return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
}
return false;
}
@@ -229,32 +229,32 @@
// Tier?LoadFeedback is basically a coefficient that determines of
// how many methods per compiler thread can be in the queue before
// the threshold values double.
-bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
+bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return loop_predicate_helper<CompLevel_none>(i, b, k);
+ return loop_predicate_helper<CompLevel_none>(i, b, k, method);
}
case CompLevel_full_profile: {
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
- return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
+ return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
}
default:
return true;
}
}
-bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
+bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- return call_predicate_helper<CompLevel_none>(i, b, k);
+ return call_predicate_helper<CompLevel_none>(i, b, k, method);
}
case CompLevel_full_profile: {
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
- return call_predicate_helper<CompLevel_full_profile>(i, b, k);
+ return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
}
default:
return true;
@@ -271,7 +271,7 @@
int i = method->invocation_count();
int b = method->backedge_count();
double k = Tier0ProfilingStartPercentage / 100.0;
- return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
+ return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
}
return false;
}
@@ -348,7 +348,7 @@
// If we were at full profile level, would we switch to full opt?
if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
- } else if ((this->*p)(i, b, cur_level)) {
+ } else if ((this->*p)(i, b, cur_level, method)) {
// C1-generated fully profiled code is about 30% slower than the limited profile
// code that has only invocation and backedge counters. The observation is that
// if C2 queue is large enough we can spend too much time in the fully profiled code
@@ -374,7 +374,7 @@
if (mdo->would_profile()) {
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
- (this->*p)(i, b, cur_level))) {
+ (this->*p)(i, b, cur_level, method))) {
next_level = CompLevel_full_profile;
}
} else {
@@ -390,7 +390,7 @@
if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count_delta();
int mdo_b = mdo->backedge_count_delta();
- if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+ if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
next_level = CompLevel_full_optimization;
}
} else {
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -84,7 +84,7 @@
* invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
* makes a call into the runtime.
*
- * - Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
+ * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
* compilation thresholds.
* Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
* Other thresholds work as follows:
@@ -100,7 +100,9 @@
* The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
* noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
* from Method* and for 3->4 transition they come from MDO (since profiled invocations are
- * counted separately).
+ * counted separately). Finally, if a method does not contain anything worth profiling, a transition
+ * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
+ * what is specified by Tier4InvocationThreshold).
*
* OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
*
@@ -164,9 +166,9 @@
// Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common().
// Predicates also take compiler load into account.
- typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
- bool call_predicate(int i, int b, CompLevel cur_level);
- bool loop_predicate(int i, int b, CompLevel cur_level);
+ typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
+ bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
+ bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
// Transition functions.
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1126,16 +1126,35 @@
}
#endif
-// Returns threshold scaled with CompileThresholdScaling
-intx Arguments::get_scaled_compile_threshold(intx threshold) {
- return (intx)(threshold * CompileThresholdScaling);
+intx Arguments::scaled_compile_threshold(intx threshold, double scale) {
+ if (scale == 1.0 || scale < 0.0) {
+ return threshold;
+ } else {
+ return (intx)(threshold * scale);
+ }
}
// Returns freq_log scaled with CompileThresholdScaling
-intx Arguments::get_scaled_freq_log(intx freq_log) {
- intx scaled_freq = get_scaled_compile_threshold((intx)1 << freq_log);
- if (scaled_freq == 0) {
- return 0;
+intx Arguments::scaled_freq_log(intx freq_log, double scale) {
+ // Check if scaling is necessary or negative value was specified.
+ if (scale == 1.0 || scale < 0.0) {
+ return freq_log;
+ }
+
+ // Check value to avoid calculating log2 of 0.
+ if (scale == 0.0) {
+ return 1;
+ }
+
+ intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
+ // Determine the maximum notification frequency value currently supported.
+ // The largest mask value that the interpreter/C1 can handle is
+ // of length InvocationCounter::number_of_count_bits. Mask values are always
+ // one bit shorter then the value of the notification frequency. Set
+ // max_freq_bits accordingly.
+ intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
+ if (scaled_freq > nth_bit(max_freq_bits)) {
+ return max_freq_bits;
} else {
return log2_intptr(scaled_freq);
}
@@ -1180,31 +1199,36 @@
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
+
+ if (CompileThresholdScaling < 0) {
+ vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
+ }
+
// Scale tiered compilation thresholds
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
- FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, get_scaled_freq_log(Tier0InvokeNotifyFreqLog));
- FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, get_scaled_freq_log(Tier0BackedgeNotifyFreqLog));
-
- FLAG_SET_ERGO(intx, Tier3InvocationThreshold, get_scaled_compile_threshold(Tier3InvocationThreshold));
- FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, get_scaled_compile_threshold(Tier3MinInvocationThreshold));
- FLAG_SET_ERGO(intx, Tier3CompileThreshold, get_scaled_compile_threshold(Tier3CompileThreshold));
- FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, get_scaled_compile_threshold(Tier3BackEdgeThreshold));
+ FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
+ FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
+
+ FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
+ FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
+ FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
+ FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
// once these thresholds become supported.
- FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, get_scaled_freq_log(Tier2InvokeNotifyFreqLog));
- FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, get_scaled_freq_log(Tier2BackedgeNotifyFreqLog));
-
- FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, get_scaled_freq_log(Tier3InvokeNotifyFreqLog));
- FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, get_scaled_freq_log(Tier3BackedgeNotifyFreqLog));
-
- FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, get_scaled_freq_log(Tier23InlineeNotifyFreqLog));
-
- FLAG_SET_ERGO(intx, Tier4InvocationThreshold, get_scaled_compile_threshold(Tier4InvocationThreshold));
- FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, get_scaled_compile_threshold(Tier4MinInvocationThreshold));
- FLAG_SET_ERGO(intx, Tier4CompileThreshold, get_scaled_compile_threshold(Tier4CompileThreshold));
- FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, get_scaled_compile_threshold(Tier4BackEdgeThreshold));
+ FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
+ FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
+
+ FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
+ FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
+
+ FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
+
+ FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
+ FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
+ FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
+ FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
}
}
@@ -3456,7 +3480,7 @@
}
if ((TieredCompilation && CompileThresholdScaling == 0)
- || (!TieredCompilation && get_scaled_compile_threshold(CompileThreshold) == 0)) {
+ || (!TieredCompilation && scaled_compile_threshold(CompileThreshold) == 0)) {
set_mode_flags(_int);
}
@@ -3896,7 +3920,7 @@
}
// Scale CompileThreshold
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
- FLAG_SET_ERGO(intx, CompileThreshold, get_scaled_compile_threshold(CompileThreshold));
+ FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
}
}
--- a/hotspot/src/share/vm/runtime/arguments.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -328,9 +328,6 @@
static bool _ClipInlining;
static bool _CIDynamicCompilePriority;
- // Scale compile thresholds
- static intx get_scaled_compile_threshold(intx threshold);
- static intx get_scaled_freq_log(intx freq_log);
// Tiered
static void set_tiered_flags();
static int get_min_number_of_compiler_threads();
@@ -452,6 +449,18 @@
static char* SharedArchivePath;
public:
+ // Scale compile thresholds
+ // Returns threshold scaled with CompileThresholdScaling
+ static intx scaled_compile_threshold(intx threshold, double scale);
+ static intx scaled_compile_threshold(intx threshold) {
+ return scaled_compile_threshold(threshold, CompileThresholdScaling);
+ }
+ // Returns freq_log scaled with CompileThresholdScaling
+ static intx scaled_freq_log(intx freq_log, double scale);
+ static intx scaled_freq_log(intx freq_log) {
+ return scaled_freq_log(freq_log, CompileThresholdScaling);
+ }
+
// Parses the arguments, first phase
static jint parse(const JavaVMInitArgs* args);
// Apply ergonomics
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -2477,7 +2477,7 @@
"Number of compiler threads to run") \
\
product(intx, CompilationPolicyChoice, 0, \
- "which compilation policy (0/1)") \
+ "which compilation policy (0-3)") \
\
develop(bool, UseStackBanging, true, \
"use stack banging for stack overflow checks (required for " \
@@ -3528,7 +3528,16 @@
\
product(double, CompileThresholdScaling, 1.0, \
"Factor to control when first compilation happens " \
- "(both with and without tiered compilation)") \
+ "(both with and without tiered compilation): " \
+ "values greater than 1.0 delay counter overflow, " \
+ "values between 0 and 1.0 rush counter overflow, " \
+ "value of 1.0 leave compilation thresholds unchanged " \
+ "value of 0.0 is equivalent to -Xint. " \
+ "" \
+ "Flag can be set as per-method option. " \
+ "If a value is specified for a method, compilation thresholds " \
+ "for that method are scaled by both the value of the global flag "\
+ "and the value of the per-method flag.") \
\
product(intx, Tier0InvokeNotifyFreqLog, 7, \
"Interpreter (tier 0) invocation notification frequency") \
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1401,15 +1401,17 @@
return (sp > (stack_limit + reserved_area));
}
-size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
+size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
assert(min_pages > 0, "sanity");
if (UseLargePages) {
const size_t max_page_size = region_size / min_pages;
for (size_t i = 0; _page_sizes[i] != 0; ++i) {
const size_t page_size = _page_sizes[i];
- if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
- return page_size;
+ if (page_size <= max_page_size) {
+ if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
+ return page_size;
+ }
}
}
}
@@ -1417,6 +1419,14 @@
return vm_page_size();
}
+size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, true);
+}
+
+size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, false);
+}
+
#ifndef PRODUCT
void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
{
@@ -1665,17 +1675,17 @@
static size_t large_page_size() {
const size_t large_page_size_example = 4 * M;
- return os::page_size_for_region(large_page_size_example, 1);
+ return os::page_size_for_region_aligned(large_page_size_example, 1);
}
- static void test_page_size_for_region() {
+ static void test_page_size_for_region_aligned() {
if (UseLargePages) {
const size_t small_page = small_page_size();
const size_t large_page = large_page_size();
if (large_page > small_page) {
size_t num_small_pages_in_large = large_page / small_page;
- size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
+ size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
assert_eq(page, small_page);
}
@@ -1688,21 +1698,53 @@
const size_t large_page = large_page_size();
if (large_page > small_page) {
const size_t unaligned_region = large_page + 17;
- size_t page = os::page_size_for_region(unaligned_region, 1);
+ size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
assert_eq(page, small_page);
const size_t num_pages = 5;
const size_t aligned_region = large_page * num_pages;
- page = os::page_size_for_region(aligned_region, num_pages);
+ page = os::page_size_for_region_aligned(aligned_region, num_pages);
assert_eq(page, large_page);
}
}
}
+ static void test_page_size_for_region_unaligned() {
+ if (UseLargePages) {
+ // Given exact page size, should return that page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given slightly larger size than a page size, return the page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected + 17, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given a slightly smaller size than a page size,
+ // return the next smaller page size.
+ if (os::_page_sizes[1] > os::_page_sizes[0]) {
+ size_t expected = os::_page_sizes[0];
+ size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1);
+ assert_eq(actual, expected);
+ }
+
+ // Return small page size for values less than a small page.
+ size_t small_page = small_page_size();
+ size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
+ assert_eq(small_page, actual);
+ }
+ }
+
public:
static void run_tests() {
- test_page_size_for_region();
+ test_page_size_for_region_aligned();
test_page_size_for_region_alignment();
+ test_page_size_for_region_unaligned();
}
};
--- a/hotspot/src/share/vm/runtime/os.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -148,6 +148,7 @@
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
+ static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
public:
static void init(void); // Called before command line parsing
@@ -267,8 +268,13 @@
// Returns the page size to use for a region of memory.
// region_size / min_pages will always be greater than or equal to the
- // returned value.
- static size_t page_size_for_region(size_t region_size, size_t min_pages);
+ // returned value. The returned value will divide region_size.
+ static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
+
+ // Returns the page size to use for a region of memory.
+ // region_size / min_pages will always be greater than or equal to the
+ // returned value. The returned value might not divide region_size.
+ static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
// Return the largest page size that can be used
static size_t max_page_size() {
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -257,28 +257,28 @@
// Call and loop predicates determine whether a transition to a higher
// compilation level should be performed (pointers to predicate functions
// are passed to common() transition function).
-bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
+bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
- return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
+ return loop_predicate_helper<CompLevel_none>(i, b, 1.0, method);
}
case CompLevel_full_profile: {
- return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+ return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
}
default:
return true;
}
}
-bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
+bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
- return call_predicate_helper<CompLevel_none>(i, b, 1.0);
+ return call_predicate_helper<CompLevel_none>(i, b, 1.0, method);
}
case CompLevel_full_profile: {
- return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+ return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
}
default:
return true;
@@ -293,8 +293,8 @@
int i = mdo->invocation_count();
int b = mdo->backedge_count();
double k = ProfileMaturityPercentage / 100.0;
- return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
- loop_predicate_helper<CompLevel_full_profile>(i, b, k);
+ return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
+ loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
}
return false;
}
@@ -313,7 +313,7 @@
// If we were at full profile level, would we switch to full opt?
if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
- } else if ((this->*p)(i, b, cur_level)) {
+ } else if ((this->*p)(i, b, cur_level, method)) {
next_level = CompLevel_full_profile;
}
break;
@@ -325,7 +325,7 @@
if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count_delta();
int mdo_b = mdo->backedge_count_delta();
- if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+ if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
next_level = CompLevel_full_optimization;
}
} else {
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -43,9 +43,9 @@
// Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common_TF().
// Predicates also take compiler load into account.
- typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
- bool call_predicate(int i, int b, CompLevel cur_level);
- bool loop_predicate(int i, int b, CompLevel cur_level);
+ typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
+ bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
+ bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel common(Predicate p, Method* method, CompLevel cur_level);
// Transition functions.
@@ -76,8 +76,8 @@
// Predicate helpers are used by .*_predicate() methods as well as others.
// They check the given counter values, multiplied by the scale against the thresholds.
- template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
- template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
+ template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
+ template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
// Get a compilation level for a given method.
static CompLevel comp_level(Method* method) {
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -25,8 +25,14 @@
#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
+#include "compiler/compilerOracle.hpp"
+
template<CompLevel level>
-bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
+bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
+ double threshold_scaling;
+ if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+ scale *= threshold_scaling;
+ }
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
@@ -40,7 +46,11 @@
}
template<CompLevel level>
-bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
+bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
+ double threshold_scaling;
+ if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+ scale *= threshold_scaling;
+ }
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -276,18 +276,6 @@
THREAD->set_current_pending_monitor_is_from_java(true);
}
-// NOTE: must use heavy weight monitor to handle jni monitor enter
-bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
- if (UseBiasedLocking) {
- BiasedLocking::revoke_and_rebias(obj, false, THREAD);
- assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
- }
-
- ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
- return monitor->try_enter(THREAD);
-}
-
-
// NOTE: must use heavy weight monitor to handle jni monitor exit
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
TEVENT(jni_exit);
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,6 @@
// Used only to handle jni locks or other unmatched monitor enter/exit
// Internally they will use heavy weight monitor.
static void jni_enter(Handle obj, TRAPS);
- static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter
static void jni_exit(oop obj, Thread* THREAD);
// Handle all interpreter, compiler and jni cases
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -38,7 +38,8 @@
}
ReservedSpace::ReservedSpace(size_t size) {
- size_t page_size = os::page_size_for_region(size, 1);
+ // Want to use large pages where possible and pad with small pages.
+ size_t page_size = os::page_size_for_region_unaligned(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size();
// Don't force the alignment to be large page aligned,
// since that will waste memory.
@@ -617,7 +618,7 @@
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
- const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
+ const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
}
@@ -1239,7 +1240,7 @@
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
- return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
+ return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
}
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 20:16:58 2017 +0200
@@ -351,11 +351,18 @@
nonstatic_field(MethodData, _arg_stack, intx) \
nonstatic_field(MethodData, _arg_returned, intx) \
nonstatic_field(MethodData, _tenure_traps, uint) \
+ nonstatic_field(MethodData, _invoke_mask, int) \
+ nonstatic_field(MethodData, _backedge_mask, int) \
nonstatic_field(DataLayout, _header._struct._tag, u1) \
nonstatic_field(DataLayout, _header._struct._flags, u1) \
nonstatic_field(DataLayout, _header._struct._bci, u2) \
nonstatic_field(DataLayout, _cells[0], intptr_t) \
nonstatic_field(MethodCounters, _nmethod_age, int) \
+ nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
+ nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
+ nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
+ nonstatic_field(MethodCounters, _invoke_mask, int) \
+ nonstatic_field(MethodCounters, _backedge_mask, int) \
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Jul 05 20:16:58 2017 +0200
@@ -1142,17 +1142,18 @@
return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits));
}
-//* largest i such that 2^i <= x
-// A negative value of 'x' will return '31'
+// Returns largest i such that 2^i <= x.
+// If x < 0, the function returns 31 on a 32-bit machine and 63 on a 64-bit machine.
+// If x == 0, the function returns -1.
inline int log2_intptr(intptr_t x) {
int i = -1;
- uintptr_t p = 1;
+ uintptr_t p = 1;
while (p != 0 && p <= (uintptr_t)x) {
// p = 2^(i+1) && p <= x (i.e., 2^(i+1) <= x)
i++; p *= 2;
}
// p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1))
- // (if p = 0 then overflow occurred and i = 31)
+ // If p = 0, overflow has occurred and i = 31 or i = 63 (depending on the machine word size).
return i;
}
--- a/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Wed Jul 05 20:16:58 2017 +0200
@@ -54,7 +54,7 @@
//
// Tier0InvokeNotifyFreqLog, Tier0BackedgeNotifyFreqLog,
// Tier3InvocationThreshold, Tier3MinInvocationThreshold,
- // Tier3CompileThreshold, and Tier3BackEdgeThreshold,
+ // Tier3CompileThreshold, Tier3BackEdgeThreshold,
// Tier2InvokeNotifyFreqLog, Tier2BackedgeNotifyFreqLog,
// Tier3InvokeNotifyFreqLog, Tier3BackedgeNotifyFreqLog,
// Tier23InlineeNotifyFreqLog, Tier4InvocationThreshold,
--- a/hotspot/test/compiler/codecache/jmx/PoolsIndependenceTest.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/compiler/codecache/jmx/PoolsIndependenceTest.java Wed Jul 05 20:16:58 2017 +0200
@@ -98,11 +98,13 @@
return false;
});
for (BlobType bt : BlobType.getAvailable()) {
- int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
- Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
- expectedNotificationsAmount, String.format("Unexpected "
- + "amount of notifications for pool: %s",
- bt.getMemoryPool().getName()));
+ if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
+ int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
+ Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
+ expectedNotificationsAmount, String.format("Unexpected "
+ + "amount of notifications for pool: %s",
+ bt.getMemoryPool().getName()));
+ }
}
try {
((NotificationEmitter) ManagementFactory.getMemoryMXBean()).
--- a/hotspot/test/compiler/codecache/jmx/ThresholdNotificationsTest.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/compiler/codecache/jmx/ThresholdNotificationsTest.java Wed Jul 05 20:16:58 2017 +0200
@@ -52,7 +52,9 @@
public static void main(String[] args) {
for (BlobType bt : BlobType.getAvailable()) {
- new ThresholdNotificationsTest(bt).runTest();
+ if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
+ new ThresholdNotificationsTest(bt).runTest();
+ }
}
}
--- a/hotspot/test/compiler/loopopts/7052494/Test7052494.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/compiler/loopopts/7052494/Test7052494.java Wed Jul 05 20:16:58 2017 +0200
@@ -25,7 +25,6 @@
/**
* @test
* @bug 7052494
- * @ignore 7154567
* @summary Eclipse test fails on JDK 7 b142
*
* @run main/othervm -Xbatch Test7052494
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stringopts/TestOptimizeStringConcat.java Wed Jul 05 20:16:58 2017 +0200
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8068909
+ * @key regression
+ * @summary test that string optimizations produce code, that doesn't lead to a crash.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestOptimizeStringConcat
+ * @author axel.siebenborn@sap.com
+ */
+public class TestOptimizeStringConcat {
+
+ static boolean checkArgumentSyntax(String value, String allowedchars, String notallowedchars, String logmsg) {
+ String rc = null;
+
+ int maxchar = 99999;
+ int minchar = 1;
+ if ((allowedchars != null && notallowedchars != null) || minchar > maxchar) {
+ rc = "internal error";
+ } else {
+ if (value == null) {
+ rc = "the value null is not allowed, it is missing";
+ } else if (value != null && minchar > 0 && value.trim().equals("")) {
+ rc = "the value must not be empty";
+ } else if (value != null) {
+ if (value.length() < minchar || value.length() > maxchar) {
+ if (rc == null) {
+ rc = "the value length must be between +minchar+ and +maxchar";
+ }
+ }
+ char[] _value = value.toCharArray();
+ boolean dotfound = false;
+ int i = 1;
+ if (_value[i] == '.' && !dotfound) {
+ dotfound = true;
+ } else if (allowedchars != null && allowedchars.indexOf(_value[i]) == -1) {
+ if (rc == null) {
+ rc = "the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+ } else {
+ rc += " / the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+ }
+ } else if (notallowedchars != null && notallowedchars.indexOf(_value[i]) != -1) {
+ if (rc == null) {
+ rc = "the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+ } else {
+ rc += " / the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+ }
+ }
+ }
+ }
+
+ if (rc != null) {
+ System.out.println(logmsg + " ==> " + rc);
+ return false;
+ }
+ return true;
+ }
+
+ public static void main(String[] args) {
+ boolean failed = false;
+ for (int i = 0; i < 10000; i++) {
+ failed |= !checkArgumentSyntax("theName", null, "\"<&", "Error consistencyCheck: name in component definition");
+ failed |= !checkArgumentSyntax(null, null, "\"<&", "Error consistencyCheck: name in component definition");
+ failed |= !checkArgumentSyntax("42", "0123456789.", null, "Error consistencyCheck: counter in component definition");
+ }
+ System.out.println(failed);
+ }
+}
--- a/hotspot/test/compiler/testlibrary/rtm/BusyLock.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/compiler/testlibrary/rtm/BusyLock.java Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,6 @@
package rtm;
-import com.oracle.java.testlibrary.Utils;
-import sun.misc.Unsafe;
-
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
@@ -42,7 +39,6 @@
// Following field have to be static in order to avoid escape analysis.
@SuppressWarnings("UnsuedDeclaration")
private static int field = 0;
- private static final Unsafe UNSAFE = Utils.getUnsafe();
protected final Object monitor;
protected final int timeout;
@@ -59,18 +55,9 @@
@Override
public void run() {
try {
- // wait until forceAbort leave monitor
- barrier.await();
- if (UNSAFE.tryMonitorEnter(monitor)) {
- try {
- barrier.await();
- Thread.sleep(timeout);
- } finally {
- UNSAFE.monitorExit(monitor);
- }
- } else {
- throw new RuntimeException("Monitor should be entered by " +
- "::run() first.");
+ synchronized (monitor) {
+ barrier.await();
+ Thread.sleep(timeout);
}
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException("Synchronization error happened.", e);
@@ -79,7 +66,6 @@
public void syncAndTest() {
try {
- barrier.await();
// wait until monitor is locked by a ::run method
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
--- a/hotspot/test/gc/TestNUMAPageSize.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/gc/TestNUMAPageSize.java Wed Jul 05 20:16:58 2017 +0200
@@ -25,6 +25,7 @@
* @test TestNUMAPageSize
* @summary Make sure that start up with NUMA support does not cause problems.
* @bug 8061467
+ * @requires (vm.opt.AggressiveOpts == null) | (vm.opt.AggressiveOpts == false)
* @key gc
* @key regression
* @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
--- a/hotspot/test/gc/TestSmallHeap.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/gc/TestSmallHeap.java Wed Jul 05 20:16:58 2017 +0200
@@ -25,6 +25,7 @@
* @test TestSmallHeap
* @bug 8067438
* @requires vm.gc=="null"
+ * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
* @summary Verify that starting the VM with a small heap works
* @library /testlibrary /../../test/lib
* @build TestSmallHeap
@@ -33,8 +34,9 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseSerialGC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseG1GC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseConcMarkSweepGC TestSmallHeap
- *
- * Note: It would be nice to verify the minimal supported heap size (2m) here,
+ */
+
+/* Note: It would be nice to verify the minimal supported heap size (2m) here,
* but we align the heap size based on the card table size. And the card table
* size is aligned based on the minimal pages size provided by the os. This
* means that on most platforms, where the minimal page size is 4k, we get a
--- a/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Wed Jul 05 20:16:58 2017 +0200
@@ -116,7 +116,14 @@
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldHaveExitValue(0);
+ try {
+ output.shouldHaveExitValue(0);
+ } catch (RuntimeException e) {
+ // It's ok if there is no client vm in the jdk.
+ if (output.firstMatch("Unrecognized option: -client") == null) {
+ throw e;
+ }
+ }
return output;
}
--- a/hotspot/test/runtime/ErrorHandling/ProblematicFrameTest.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/runtime/ErrorHandling/ProblematicFrameTest.java Wed Jul 05 20:16:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
- "-Xmx64m", "-XX:-TransmitErrorReport", Crasher.class.getName());
+ "-Xmx64m", "-XX:-TransmitErrorReport", "-XX:-CreateMinidumpOnCrash", Crasher.class.getName());
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotMatch("error occurred during error reporting \\(printing problematic frame\\)");
}
--- a/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java Thu Jan 29 16:16:35 2015 -0800
+++ b/hotspot/test/serviceability/dcmd/compiler/CompilerQueueTest.java Wed Jul 05 20:16:58 2017 +0200
@@ -25,6 +25,7 @@
* @test CompilerQueueTest
* @bug 8054889
* @library ..
+ * @ignore 8069160
* @build DcmdUtil CompilerQueueTest
* @run main CompilerQueueTest
* @run main/othervm -XX:-TieredCompilation CompilerQueueTest
--- a/make/Images.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/Images.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -111,14 +111,16 @@
# Use this file inside the image as target for make rule
JIMAGE_TARGET_FILE := bin/java$(EXE_SUFFIX)
-$(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES)
+$(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES) \
+ $(call DependOnVariable, JDK_MODULES_LIST)
$(ECHO) Creating jdk jimage
$(RM) -r $(JDK_IMAGE_DIR) $(JDK_SORTED_MODULES)
$(JIMAGE_TOOL) --mods $(JDK_MODULES_LIST) --output $(JDK_IMAGE_DIR) \
$(MODULES_XML) > $(JDK_SORTED_MODULES)
$(TOUCH) $@
-$(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES)
+$(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES) \
+ $(call DependOnVariable, JRE_MODULES_LIST)
$(ECHO) Creating jre jimage
$(RM) -r $(JRE_IMAGE_DIR) $(JRE_SORTED_MODULES)
$(JIMAGE_TOOL) --mods $(JRE_MODULES_LIST) --output $(JRE_IMAGE_DIR) \
@@ -131,7 +133,8 @@
COMPACT_EXTRA_MODULES := jdk.localedata jdk.crypto.pkcs11 jdk.crypto.ec
-$(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES)
+$(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES) \
+ $(call DependOnVariable, JRE_COMPACT1_MODULES_LIST)
$(ECHO) Creating jre compact1 jimage
$(RM) -r $(JRE_COMPACT1_IMAGE_DIR) $(JRE_COMPACT1_SORTED_MODULES)
$(JIMAGE_TOOL) \
@@ -140,7 +143,8 @@
$(MODULES_XML) > $(JRE_COMPACT1_SORTED_MODULES)
$(TOUCH) $@
-$(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES)
+$(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES) \
+ $(call DependOnVariable, JRE_COMPACT2_MODULES_LIST)
$(ECHO) Creating jre compact2 jimage
$(RM) -r $(JRE_COMPACT2_IMAGE_DIR) $(JRE_COMPACT2_SORTED_MODULES)
$(JIMAGE_TOOL) \
@@ -149,7 +153,8 @@
$(MODULES_XML) > $(JRE_COMPACT2_SORTED_MODULES)
$(TOUCH) $@
-$(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES)
+$(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(DEPENDENCIES) \
+ $(call DependOnVariable, JRE_COMPACT3_MODULES_LIST)
$(ECHO) Creating jre compact3 jimage
$(RM) -r $(JRE_COMPACT3_IMAGE_DIR) $(JRE_COMPACT3_SORTED_MODULES)
$(JIMAGE_TOOL) \
@@ -368,45 +373,59 @@
# Common way to emit a line into the release or info file
define info-file-item # name value
- $(PRINTF) '%s="%s"\n' $1 $2 >> $@
+ $(PRINTF) '%s="%s"\n' $1 $2 >> $@
endef
# Param 1 - The file containing the MODULES list
define create-info-file
- $(ECHO) $(LOG_INFO) Generating $(patsubst $(OUTPUT_ROOT)/%,%,$@)
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(call info-file-item, "JAVA_VERSION", "$(JDK_VERSION)")
- $(call info-file-item, "OS_NAME", "$(REQUIRED_OS_NAME)")
- $(call info-file-item, "OS_VERSION", "$(REQUIRED_OS_VERSION)")
- $(call info-file-item, "OS_ARCH", "$(OPENJDK_TARGET_CPU_LEGACY)")
- $(if $(JDK_ARCH_ABI_PROP_NAME), \
- $(call info-file-item, "SUN_ARCH_ABI", "$(JDK_ARCH_ABI_PROP_NAME)"))
- $(call info-file-item, "SOURCE", "$(ALL_SOURCE_TIPS)")
- $(call info-file-item, "MODULES", "`$(CAT) $1`")
+ $(call info-file-item, "JAVA_VERSION", "$(JDK_VERSION)")
+ $(call info-file-item, "OS_NAME", "$(REQUIRED_OS_NAME)")
+ $(call info-file-item, "OS_VERSION", "$(REQUIRED_OS_VERSION)")
+ $(call info-file-item, "OS_ARCH", "$(OPENJDK_TARGET_CPU_LEGACY)")
+ $(if $(JDK_ARCH_ABI_PROP_NAME), \
+ $(call info-file-item, "SUN_ARCH_ABI", "$(JDK_ARCH_ABI_PROP_NAME)"))
+ $(call info-file-item, "SOURCE", "$(ALL_SOURCE_TIPS)")
+ $(call info-file-item, "MODULES", "`$(CAT) $1`")
endef
+# Param 1 - The file containing the MODULES list
+define prepare-info-file
+ $(ECHO) $(LOG_INFO) Generating $(patsubst $(OUTPUT_ROOT)/%,%,$@)
+ $(MKDIR) -p $(@D)
+ $(RM) $@
+endef
+
+define info-file
+ $(call prepare-info-file, $1)
+ $(call create-info-file, $1)
+endef
+
+# Create a variable dependency file common for all release info files. The
+# sorted module list will only change if the image is regenerated, which will
+# trigger a rebuild of these files anyway.
+INFO_FILE_VARDEPS := $(call DependOnVariable, create-info-file)
+
ALL_SOURCE_TIPS = $(shell \
if [ -f $(SUPPORT_OUTPUTDIR)/source_tips ] ; then \
$(CAT) $(SUPPORT_OUTPUTDIR)/source_tips ; \
fi)
-$(JRE_INFO_FILE): $(OUTPUT_ROOT)/spec.gmk $(SUPPORT_OUTPUTDIR)/source_tips
- $(call create-info-file, $(JRE_SORTED_MODULES))
+$(JRE_INFO_FILE): $(INFO_FILE_VARDEPS) $(SUPPORT_OUTPUTDIR)/source_tips
+ $(call info-file, $(JRE_SORTED_MODULES))
-$(JDK_INFO_FILE): $(OUTPUT_ROOT)/spec.gmk $(SUPPORT_OUTPUTDIR)/source_tips
- $(call create-info-file, $(JDK_SORTED_MODULES))
+$(JDK_INFO_FILE): $(INFO_FILE_VARDEPS) $(SUPPORT_OUTPUTDIR)/source_tips
+ $(call info-file, $(JDK_SORTED_MODULES))
-$(JRE_COMPACT1_INFO_FILE): $(OUTPUT_ROOT)/spec.gmk $(SUPPORT_OUTPUTDIR)/source_tips
- $(call create-info-file, $(JRE_COMPACT1_SORTED_MODULES))
+$(JRE_COMPACT1_INFO_FILE): $(INFO_FILE_VARDEPS) $(SUPPORT_OUTPUTDIR)/source_tips
+ $(call info-file, $(JRE_COMPACT1_SORTED_MODULES))
$(call info-file-item, "JAVA_PROFILE", "compact1")
-$(JRE_COMPACT2_INFO_FILE): $(OUTPUT_ROOT)/spec.gmk $(SUPPORT_OUTPUTDIR)/source_tips
- $(call create-info-file, $(JRE_COMPACT2_SORTED_MODULES))
+$(JRE_COMPACT2_INFO_FILE): $(INFO_FILE_VARDEPS) $(SUPPORT_OUTPUTDIR)/source_tips
+ $(call info-file, $(JRE_COMPACT2_SORTED_MODULES))
$(call info-file-item, "JAVA_PROFILE", "compact2")
-$(JRE_COMPACT3_INFO_FILE): $(OUTPUT_ROOT)/spec.gmk $(SUPPORT_OUTPUTDIR)/source_tips
- $(call create-info-file, $(JRE_COMPACT3_SORTED_MODULES))
+$(JRE_COMPACT3_INFO_FILE): $(INFO_FILE_VARDEPS) $(SUPPORT_OUTPUTDIR)/source_tips
+ $(call info-file, $(JRE_COMPACT3_SORTED_MODULES))
$(call info-file-item, "JAVA_PROFILE", "compact3")
JRE_TARGETS += $(JRE_INFO_FILE)
--- a/make/Main.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/Main.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -402,6 +402,8 @@
verify-modules: exploded-image
+ test-make: clean-test-make
+
endif
################################################################################
@@ -446,7 +448,7 @@
# alias for ease of use.
jdk: exploded-image
-images: test-image jimages demos samples zip-security
+images: test-image jimages demos samples zip-security verify-modules
ifeq ($(OPENJDK_TARGET_OS), macosx)
images: mac-bundles
@@ -477,7 +479,7 @@
# file.
CLEAN_DIRS += hotspot jdk bootcycle-build test buildtools support \
- images make-support
+ images make-support test-make
CLEAN_DIR_TARGETS := $(addprefix clean-, $(CLEAN_DIRS))
CLEAN_PHASES := gensrc java native include
CLEAN_PHASE_TARGETS := $(addprefix clean-, $(CLEAN_PHASES))
--- a/make/ZipSource.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/ZipSource.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -71,7 +71,7 @@
$(JDK_TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_API_DIR)/native/libjli/java_md*)))
# This dir needs to exist before macro is evaluated to avoid warning from find.
-$(eval $(call MakeDir, $(SUPPORT_OUTPUTDIR)/src))
+$(call MakeDir, $(SUPPORT_OUTPUTDIR)/src)
$(eval $(call SetupZipArchive,BUILD_SRC_ZIP, \
SRC := $(SRC_ZIP_SRCS) $(SUPPORT_OUTPUTDIR)/src, \
INCLUDES := $(SRC_ZIP_INCLUDES) launcher, \
--- a/make/common/IdlCompilation.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/common/IdlCompilation.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -99,7 +99,7 @@
$(if $(16),$(error Internal makefile error: Too many arguments to SetupIdlCompilation, please update IdlCompilation.gmk))
# Find all existing java files and existing class files.
- $$(eval $$(call MakeDir,$$($1_BIN)))
+ $$(call MakeDir,$$($1_BIN))
$1_SRCS := $$(shell find $$($1_SRC) -name "*.idl")
$1_BINS := $$(shell find $$($1_BIN) -name "*.java")
# Prepend the source/bin path to the filter expressions.
--- a/make/common/JavaCompilation.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/common/JavaCompilation.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -126,17 +126,20 @@
$1_FIND_PATTERNS:=$(FALSE_FIND_PATTERN) $$(patsubst %,$(SPACE)-o$(SPACE)-name$(SPACE)$(DQUOTE)*%$(DQUOTE),$$($1_SUFFIXES))
# On windows, a lot of includes/excludes risk making the command line too long, so
# writing the grep patterns to files.
+ # Grep returns 1 if nothing is matched. Do not fail the build for this.
ifneq (,$$($1_INCLUDES))
$1_GREP_INCLUDE_PATTERNS:=$$(call EscapeDollar, \
$$(foreach src,$$($1_SRCS), $$(addprefix $$(src)/,$$($1_INCLUDES))))
# If there are a lot of include patterns, output to file to shorten command lines
ifeq ($$(word 20,$$($1_GREP_INCLUDE_PATTERNS)),)
- $1_GREP_INCLUDES:=| $(GREP) $$(patsubst %,$(SPACE)-e$(SPACE)$(DQUOTE)%$(DQUOTE),$$($1_GREP_INCLUDE_PATTERNS))
+ $1_GREP_INCLUDES:=| ( $(GREP) $$(patsubst %,$(SPACE)-e$(SPACE)$(DQUOTE)%$(DQUOTE),$$($1_GREP_INCLUDE_PATTERNS)) \
+ || test "$$$$?" = "1" )
else
$1_GREP_INCLUDE_OUTPUT:=$(RM) $$($1_BIN)/_the.$$($1_JARNAME)_include $$(NEWLINE) \
$$(call ListPathsSafely,$1_GREP_INCLUDE_PATTERNS,\n, \
>> $$($1_BIN)/_the.$$($1_JARNAME)_include)
- $1_GREP_INCLUDES:=| $(GREP) -f $$($1_BIN)/_the.$$($1_JARNAME)_include
+ $1_GREP_INCLUDES:=| ( $(GREP) -f $$($1_BIN)/_the.$$($1_JARNAME)_include \
+ || test "$$$$?" = "1" )
endif
endif
ifneq (,$$($1_EXCLUDES)$$($1_EXCLUDE_FILES))
@@ -145,12 +148,14 @@
$$($1_EXCLUDES) $$($1_EXCLUDE_FILES))))
# If there are a lot of include patterns, output to file to shorten command lines
ifeq ($$(word 20,$$($1_GREP_EXCLUDE_PATTERNS)),)
- $1_GREP_EXCLUDES:=| $(GREP) -v $$(patsubst %,$(SPACE)-e$(SPACE)$(DQUOTE)%$(DQUOTE),$$($1_GREP_EXCLUDE_PATTERNS))
+ $1_GREP_EXCLUDES:=| ( $(GREP) -v $$(patsubst %,$(SPACE)-e$(SPACE)$(DQUOTE)%$(DQUOTE),$$($1_GREP_EXCLUDE_PATTERNS)) \
+ || test "$$$$?" = "1" )
else
$1_GREP_EXCLUDE_OUTPUT=$(RM) $$($1_BIN)/_the.$$($1_JARNAME)_exclude $$(NEWLINE) \
$$(call ListPathsSafely,$1_GREP_EXCLUDE_PATTERNS,\n, \
>> $$($1_BIN)/_the.$$($1_JARNAME)_exclude)
- $1_GREP_EXCLUDES:=| $(GREP) -v -f $$($1_BIN)/_the.$$($1_JARNAME)_exclude
+ $1_GREP_EXCLUDES:=| ( $(GREP) -v -f $$($1_BIN)/_the.$$($1_JARNAME)_exclude \
+ || test "$$$$?" = "1" )
endif
endif
@@ -222,9 +227,11 @@
$$($1_CAPTURE_EXTRA_FILES)
# The capture metainf macro finds all files below the META-INF directory that are newer than the jar-file.
+ # Find returns non zero if the META-INF dir does not exist, ignore this.
ifeq (,$$($1_SKIP_METAINF))
$1_CAPTURE_METAINF =$$(foreach src,$$($1_SRCS), \
- ( $(FIND) $$(src)/META-INF -type f -a -newer $$@ 2> /dev/null | $(SED) 's|$$(src)/|-C $$(src) |g' >> \
+ ( ( $(FIND) $$(src)/META-INF -type f -a -newer $$@ 2> /dev/null || true ) \
+ | $(SED) 's|$$(src)/|-C $$(src) |g' >> \
$$($1_BIN)/_the.$$($1_JARNAME)_contents ) $$(NEWLINE) )
endif
# The capture deletes macro finds all deleted files and concatenates them. The resulting file
@@ -248,9 +255,11 @@
>> $$($1_BIN)/_the.$$($1_JARNAME)_contents $$(NEWLINE)) \
$$($1_CAPTURE_EXTRA_FILES)
+ # Find returns non zero if the META-INF dir does not exist, ignore this.
ifeq (,$$($1_SKIP_METAINF))
$1_SCAPTURE_METAINF=$$(foreach src,$$($1_SRCS), \
- ( $(FIND) $$(src)/META-INF -type f 2> /dev/null | $(SED) 's|$$(src)/|-C $$(src) |g' >> \
+ ( ( $(FIND) $$(src)/META-INF -type f 2> /dev/null || true ) \
+ | $(SED) 's|$$(src)/|-C $$(src) |g' >> \
$$($1_BIN)/_the.$$($1_JARNAME)_contents) $$(NEWLINE) )
endif
$1_SUPDATE_CONTENTS=$(JAR) $$($1_JAR_UPDATE_OPTIONS) $$@ @$$($1_BIN)/_the.$$($1_JARNAME)_contents $$(NEWLINE)
@@ -270,19 +279,37 @@
$1_JAR_UPDATE_OPTIONS := uf
endif
+ # Include all variables of significance in the vardeps file
+ $1_VARDEPS := $(JAR) $$($1_JAR_CREATE_OPTIONS) $$($1_MANIFEST) $(RELEASE) $(COMPANY_NAME) \
+ $$($1_JARMAIN) $$($1_EXTRA_MANIFEST_ATTR)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$(dir $$($1_JAR))_the.$$($1_JARNAME).vardeps)
+
# Here is the rule that creates/updates the jar file.
- $$($1_JAR) : $$($1_DEPS)
+ $$($1_JAR) : $$($1_DEPS) $$($1_MANIFEST) $$($1_VARDEPS_FILE)
$(MKDIR) -p $$($1_BIN)
$$($1_GREP_INCLUDE_OUTPUT)
$$($1_GREP_EXCLUDE_OUTPUT)
- $$(if $$($1_MANIFEST), \
- $(SED) -e "s#@@RELEASE@@#$(RELEASE)#" \
- -e "s#@@COMPANY_NAME@@#$(COMPANY_NAME)#" $$($1_MANIFEST) > $$($1_MANIFEST_FILE) \
+ # If the vardeps file is part of the newer prereq list, it means that
+ # either the jar file does not exist, or we need to recreate it from
+ # from scratch anyway since a simple update will not catch all the
+ # potential changes.
+ $$(if $$(filter $$($1_VARDEPS_FILE) $$($1_MANIFEST), $$?), \
+ $$(if $$($1_MANIFEST), \
+ $(SED) -e "s#@@RELEASE@@#$(RELEASE)#" \
+ -e "s#@@COMPANY_NAME@@#$(COMPANY_NAME)#" $$($1_MANIFEST) > $$($1_MANIFEST_FILE) $$(NEWLINE) \
+ , \
+ $(RM) $$($1_MANIFEST_FILE) && $(TOUCH) $$($1_MANIFEST_FILE) $$(NEWLINE)) \
+ $$(if $$($1_JARMAIN), \
+ $(ECHO) "Main-Class: $$(strip $$($1_JARMAIN))" >> $$($1_MANIFEST_FILE) $$(NEWLINE)) \
+ $$(if $$($1_EXTRA_MANIFEST_ATTR), \
+ $(PRINTF) "$$($1_EXTRA_MANIFEST_ATTR)\n" >> $$($1_MANIFEST_FILE) $$(NEWLINE)) \
+ $(ECHO) Creating $$($1_NAME) $$(NEWLINE) \
+ $(JAR) $$($1_JAR_CREATE_OPTIONS) $$@ $$($1_MANIFEST_FILE) $$(NEWLINE) \
+ $$($1_SCAPTURE_CONTENTS) \
+ $$($1_SCAPTURE_METAINF) \
+ $$($1_SUPDATE_CONTENTS) \
+ $$($1_JARINDEX) && true \
, \
- $(RM) $$($1_MANIFEST_FILE) && $(TOUCH) $$($1_MANIFEST_FILE))
- $$(if $$($1_JARMAIN),$(ECHO) "Main-Class: $$(strip $$($1_JARMAIN))" >> $$($1_MANIFEST_FILE))
- $$(if $$($1_EXTRA_MANIFEST_ATTR),$(PRINTF) "$$($1_EXTRA_MANIFEST_ATTR)\n" >> $$($1_MANIFEST_FILE))
- $$(if $$(wildcard $$@), \
$(ECHO) Modifying $$($1_NAME) $$(NEWLINE) \
$$($1_CAPTURE_CONTENTS) \
$$($1_CAPTURE_METAINF) \
@@ -294,12 +321,6 @@
$(ZIP) -q -d $$@ `$(CAT) $$($1_DELETESS_FILE)` ; \
fi $$(NEWLINE) \
$$($1_UPDATE_CONTENTS) true $$(NEWLINE) \
- $$($1_JARINDEX) && true \
- , \
- $(ECHO) Creating $$($1_NAME) && $(JAR) $$($1_JAR_CREATE_OPTIONS) $$@ $$($1_MANIFEST_FILE) $$(NEWLINE) \
- $$($1_SCAPTURE_CONTENTS) \
- $$($1_SCAPTURE_METAINF) \
- $$($1_SUPDATE_CONTENTS) \
$$($1_JARINDEX) && true )
# Add jar to target list
@@ -431,7 +452,7 @@
$1_SRC:=$$(call ADD_SRCS,$$($1_SRC))
# Make sure the dirs exist.
$$(foreach d,$$($1_SRC), $$(if $$(wildcard $$d),,$$(error SRC specified to SetupJavaCompilation $1 contains missing directory $$d)))
- $$(eval $$(call MakeDir,$$($1_BIN)))
+ $$(call MakeDir,$$($1_BIN))
# Add all source roots to the find cache since we are likely going to run find
# on these more than once. The cache will only be updated if necessary.
$$(eval $$(call FillCacheFind,$$($1_SRC)))
@@ -475,23 +496,23 @@
$1_ALL_COPIES += $$(filter $$(addprefix %,$$($1_COPY)),$$($1_ALL_SRCS))
# Copy these explicitly
$1_ALL_COPIES += $$($1_COPY_FILES)
- # Copy must also respect filters.
- ifneq (,$$($1_INCLUDES))
- $1_ALL_COPIES := $$(filter $$($1_SRC_INCLUDES),$$($1_ALL_COPIES))
- endif
- ifneq (,$$($1_EXCLUDES))
- $1_ALL_COPIES := $$(filter-out $$($1_SRC_EXCLUDES),$$($1_ALL_COPIES))
- endif
- ifneq (,$$($1_EXCLUDE_FILES))
- $1_ALL_COPIES := $$(filter-out $$($1_EXCLUDE_FILES_PATTERN),$$($1_ALL_COPIES))
- endif
+ endif
+ # Copy must also respect filters.
+ ifneq (,$$($1_INCLUDES))
+ $1_ALL_COPIES := $$(filter $$($1_SRC_INCLUDES),$$($1_ALL_COPIES))
+ endif
+ ifneq (,$$($1_EXCLUDES))
+ $1_ALL_COPIES := $$(filter-out $$($1_SRC_EXCLUDES),$$($1_ALL_COPIES))
endif
- ifneq (,$$($1_ALL_COPIES))
- # Yep, there are files to be copied!
- $1_ALL_COPY_TARGETS:=
- $$(foreach i,$$($1_ALL_COPIES),$$(eval $$(call add_file_to_copy,$1,$$i)))
- # Now we can depend on $$($1_ALL_COPY_TARGETS) to copy all files!
- endif
+ ifneq (,$$($1_EXCLUDE_FILES))
+ $1_ALL_COPIES := $$(filter-out $$($1_EXCLUDE_FILES_PATTERN),$$($1_ALL_COPIES))
+ endif
+ ifneq (,$$($1_ALL_COPIES))
+ # Yep, there are files to be copied!
+ $1_ALL_COPY_TARGETS:=
+ $$(foreach i,$$($1_ALL_COPIES),$$(eval $$(call add_file_to_copy,$1,$$i)))
+ # Now we can depend on $$($1_ALL_COPY_TARGETS) to copy all files!
+ endif
# Find all property files to be copied and cleaned from source to bin.
ifneq (,$$($1_CLEAN)$$($1_CLEAN_FILES))
@@ -535,9 +556,14 @@
$1_SJAVAC:=$$(subst com.sun.tools.javac.Main,com.sun.tools.sjavac.Main,$$($1_JAVAC))
# Set the $1_REMOTE to spawn a background javac server.
- $1_REMOTE:=--server:portfile=$$($1_SJAVAC_PORTFILE),id=$1,sjavac=$$(subst $$(SPACE),%20,$$(subst $$(COMMA),%2C,$$(strip $$($1_SERVER_JVM) $$($1_SJAVAC))))
+ $1_REMOTE:=--server:portfile=$$($1_SJAVAC_PORTFILE),id=$1,sjavac=$$(subst \
+ $$(SPACE),%20,$$(subst $$(COMMA),%2C,$$(strip $$($1_SERVER_JVM) $$($1_SJAVAC))))
- $$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS)
+ $1_VARDEPS := $$($1_JVM) $$($1_SJAVAC) $$($1_SJAVAC_ARGS) $$($1_FLAGS) \
+ $$($1_HEADERS_ARG) $$($1_BIN)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$($1_BIN)/_the.$1.vardeps)
+
+ $$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS) $$($1_VARDEPS_FILE)
$(MKDIR) -p $$(@D) $$(dir $$($1_SJAVAC_PORTFILE))
# As a workaround for sjavac not tracking api changed from the classpath, force full
# recompile if an external dependency, which is something other than a source
@@ -592,8 +618,11 @@
$1_HEADER_TARGETS := $$($1_HEADERS)/_the.$1_headers
endif
+ $1_VARDEPS := $$($1_JVM) $$($1_JAVAC) $$($1_FLAGS) $$($1_BIN) $$($1_HEADERS_ARG)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$($1_BIN)/_the.$1.vardeps)
+
# When not using sjavac, pass along all sources to javac using an @file.
- $$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS)
+ $$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS) $$($1_VARDEPS_FILE)
$(MKDIR) -p $$(@D)
$(RM) $$($1_BIN)/_the.$1_batch $$($1_BIN)/_the.$1_batch.tmp
$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.$1_batch.tmp)
@@ -659,4 +688,5 @@
$(if $(findstring yes, $(ENABLE_SJAVAC)), $(strip $2)/_the.$(strip $1)_pubapi, \
$(strip $2)/_the.$(strip $1)_batch)
endef
-endif
+
+endif # _JAVA_COMPILATION_GMK
--- a/make/common/MakeBase.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/common/MakeBase.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -349,7 +349,7 @@
# (and causing a crash on Cygwin).
# Default shell seems to always be /bin/sh. Must override with bash to get this to work on Solaris.
# Only use time if it's GNU time which supports format and output file.
- WRAPPER_SHELL:=$$(BASH) $$(SRC_ROOT)/common/bin/shell-tracer.sh $$(if $$(findstring yes,$$(IS_GNU_TIME)),$$(TIME),-) $$(OUTPUT_ROOT)/build-trace-time.log $$(BASH)
+ WRAPPER_SHELL:=$$(BASH) $$(SRC_ROOT)/common/bin/shell-tracer.sh $$(if $$(findstring yes,$$(IS_GNU_TIME)),$$(TIME),-) $$(OUTPUT_ROOT)/build-trace-time.log $$(SHELL)
SHELL=$$(warning $$(if $$@,Building $$@,Running shell command) $$(if $$<, (from $$<))$$(if $$?, ($$(wordlist 1, 20, $$?) $$(if $$(wordlist 21, 22, $$?), ... [in total $$(words $$?) files]) newer)))$$(WRAPPER_SHELL)
endif
# Never remove warning messages; this is just for completeness
@@ -392,11 +392,9 @@
endef
# Make directory without forking mkdir if not needed
-define MakeDir
- ifneq ($$(wildcard $1 $2 $3 $4 $5 $6 $7 $8 $9),$$(strip $1 $2 $3 $4 $5 $6 $7 $8 $9))
- $$(shell $(MKDIR) -p $1 $2 $3 $4 $5 $6 $7 $8 $9)
- endif
-endef
+MakeDir = \
+ $(strip $(if $(subst $(wildcard $1 $2 $3 $4 $5 $6 $7 $8 $9),,$(strip $1 $2 $3 $4 $5 $6 $7 $8 $9)),\
+ $(shell $(MKDIR) -p $1 $2 $3 $4 $5 $6 $7 $8 $9)))
ifeq ($(OPENJDK_TARGET_OS),solaris)
# On Solaris, if the target is a symlink and exists, cp won't overwrite.
@@ -446,6 +444,11 @@
# Filter out duplicate sub strings while preserving order. Keeps the first occurance.
uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+# String equals
+equals = \
+ $(and $(findstring $(strip $1),$(strip $2)),\
+ $(findstring $(strip $2),$(strip $1)))
+
ifneq ($(DISABLE_CACHE_FIND), true)
################################################################################
# In Cygwin, finds are very costly, both because of expensive forks and because
@@ -543,6 +546,80 @@
endef
################################################################################
+# ShellQuote
+#
+# Quotes a string with single quotes and replaces single quotes with '\'' so
+# that the contents survives being given to the shell.
+
+ShellQuote = \
+ $(SQUOTE)$(subst $(SQUOTE),$(SQUOTE)\$(SQUOTE)$(SQUOTE),$(strip $1))$(SQUOTE)
+
+################################################################################
+# Write to and read from file
+
+# Param 1 - File to read
+ReadFile = \
+ $(shell $(CAT) $1)
+
+# Param 1 - Text to write
+# Param 2 - File to write to
+# Use printf to get consistent behavior on all platforms.
+WriteFile = \
+ $(shell $(PRINTF) "%s" $(call ShellQuote, $1) > $2)
+
+################################################################################
+# DependOnVariable
+#
+# This macro takes a variable name and puts the value in a file only if the
+# value has changed since last. The name of the file is returned. This can be
+# used to create rule dependencies on make variable values. The following
+# example would get rebuilt if the value of SOME_VAR was changed:
+#
+# path/to/some-file: $(call DependOnVariable, SOME_VAR)
+# echo $(SOME_VAR) > $@
+#
+# Note that leading and trailing white space in the value is ignored.
+#
+
+# Defines the sub directory structure to store variable value file in
+DependOnVariableDirName = \
+ $(strip $(subst $(SRC_ROOT)/,,\
+ $(if $(filter /%, $(firstword $(MAKEFILE_LIST))), \
+ $(firstword $(MAKEFILE_LIST)), \
+ $(CURDIR)/$(firstword $(MAKEFILE_LIST)))))
+
+# Defines the name of the file to store variable value in. Generates a name
+# unless parameter 2 is given.
+# Param 1 - Name of variable
+# Param 2 - (optional) name of file to store value in
+DependOnVariableFileName = \
+ $(strip $(if $(strip $2), $2, \
+ $(MAKESUPPORT_OUTPUTDIR)/vardeps/$(DependOnVariableDirName)/$(strip $1).vardeps))
+
+# Does the actual work with parameters stripped.
+# If the file exists AND the contents is the same as the variable, do nothing
+# else print a new file.
+# Always returns the name of the file where the value was printed.
+# Param 1 - Name of variable
+# Param 2 - (optional) name of file to store value in
+DependOnVariableHelper = \
+ $(strip $(if $(and $(wildcard $(call DependOnVariableFileName, $1, $2)),\
+ $(call equals, $(strip $($1)), \
+ $(call ReadFile, $(call DependOnVariableFileName, $1, $2)))),,\
+ $(call MakeDir, $(dir $(call DependOnVariableFileName, $1, $2))) \
+ $(if $(findstring $(LOG_LEVEL), trace), \
+ $(info Variable $1: >$(strip $($1))<) \
+ $(info File: >$(call ReadFile, $(call DependOnVariableFileName, $1, $2))<)) \
+ $(call WriteFile, $($1), $(call DependOnVariableFileName, $1, $2))) \
+ $(call DependOnVariableFileName, $1, $2))
+
+# Main macro
+# Param 1 - Name of variable
+# Param 2 - (optional) name of file to store value in
+DependOnVariable = \
+ $(call DependOnVariableHelper,$(strip $1),$(strip $2))
+
+################################################################################
# Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, , common/MakeBase.gmk))
--- a/make/common/NativeCompilation.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/common/NativeCompilation.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -114,7 +114,7 @@
endif
endif
- $$($1_$2_OBJ) : $2 | $$($1_BUILD_INFO)
+ $$($1_$2_OBJ) : $2 $$($1_COMPILE_VARDEPS_FILE) | $$($1_BUILD_INFO)
$(ECHO) $(LOG_INFO) "Compiling $$(notdir $2) (for $$(notdir $$($1_TARGET)))"
ifneq ($(TOOLCHAIN_TYPE), microsoft)
# The Solaris studio compiler doesn't output the full path to the object file in the
@@ -133,7 +133,8 @@
($$($1_$2_COMP) $$($1_$2_FLAGS) -showIncludes $$($1_$2_DEBUG_OUT_FLAGS) \
$(CC_OUT_OPTION)$$($1_$2_OBJ) $2 ; echo $$$$? > $$($1_$2_DEP).exitvalue) \
| $(TEE) $$($1_$2_DEP).raw | $(GREP) -v -e "^Note: including file:" \
- -e "^$(notdir $2)$$$$" ; exit `cat $$($1_$2_DEP).exitvalue`
+ -e "^$(notdir $2)$$$$" || test "$$$$?" = "1" ; \
+ exit `cat $$($1_$2_DEP).exitvalue`
$(RM) $$($1_$2_DEP).exitvalue
($(ECHO) $$@: \\ \
&& $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_$2_DEP).raw) > $$($1_$2_DEP)
@@ -306,7 +307,7 @@
endif
# Make sure the dirs exist.
- $$(eval $$(call MakeDir,$$($1_OBJECT_DIR) $$($1_OUTPUT_DIR)))
+ $$(call MakeDir,$$($1_OBJECT_DIR) $$($1_OUTPUT_DIR))
$$(foreach d,$$($1_SRC), $$(if $$(wildcard $$d),,$$(error SRC specified to SetupNativeCompilation $1 contains missing directory $$d)))
# Find all files in the source trees. Sort to remove duplicates.
@@ -426,15 +427,16 @@
$1_BUILD_INFO := $$($1_OBJECT_DIR)/_build-info.marker
- # Setup rule for printing progress info when compiling source files.
- # This is a rough heuristic and may not always print accurate information.
- $$($1_BUILD_INFO): $$($1_SRCS)
- ifeq ($$(wildcard $$($1_TARGET)),)
- $(ECHO) 'Creating $$($1_BASENAME) from $$(words $$?) file(s)'
- else
- $(ECHO) 'Updating $$($1_BASENAME) from $$(words $$?) file(s)'
- endif
- $(TOUCH) $$@
+ # Track variable changes for all variables that affect the compilation command
+ # lines for all object files in this setup. This includes at least all the
+ # variables used in the call to add_native_source below.
+ $1_COMPILE_VARDEPS := $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $(SYSROOT_CFLAGS) \
+ $$($1_CXXFLAGS) $$($1_EXTRA_CXXFLAGS) \
+ $$($1_CC) $$($1_CXX) $$($1_OBJC) $$($1_ASFLAGS) \
+ $$(foreach s, $$($1_SRCS), \
+ $$($1_$$(notdir $$s)_CFLAGS) $$($1_$$(notdir $$s)_CXXFLAGS))
+ $1_COMPILE_VARDEPS_FILE := $$(call DependOnVariable, $1_COMPILE_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).comp.vardeps)
# Now call add_native_source for each source file we are going to compile.
$$(foreach p,$$($1_SRCS), \
@@ -444,13 +446,28 @@
$$($1_CXXFLAGS) $$($1_EXTRA_CXXFLAGS) $(SYSROOT_CFLAGS), \
$$($1_CXX),$$($1_OBJC),$$($1_ASFLAGS))))
+ # Setup rule for printing progress info when compiling source files.
+ # This is a rough heuristic and may not always print accurate information.
+ $$($1_BUILD_INFO): $$($1_SRCS) $$($1_COMPILE_VARDEPS_FILE)
+ ifeq ($$(wildcard $$($1_TARGET)),)
+ $(ECHO) 'Creating $$($1_BASENAME) from $$(words $$(filter-out %.vardeps, $$?)) file(s)'
+ else
+ $(ECHO) 'Updating $$($1_BASENAME) from $$(words $$(filter-out %.vardeps, $$?)) file(s)'
+ endif
+ $(TOUCH) $$@
+
# On windows we need to create a resource file
ifeq ($(OPENJDK_TARGET_OS), windows)
ifneq (,$$($1_VERSIONINFO_RESOURCE))
$1_RES:=$$($1_OBJECT_DIR)/$$($1_BASENAME).res
$1_RES_DEP:=$$($1_RES).d
-include $$($1_RES_DEP)
- $$($1_RES): $$($1_VERSIONINFO_RESOURCE)
+
+ $1_RES_VARDEPS := $(RC) $$($1_RC_FLAGS)
+ $1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \
+ $$($1_RES).vardeps)
+
+ $$($1_RES): $$($1_VERSIONINFO_RESOURCE) $$($1_RES_VARDEPS_FILE)
$(ECHO) $(LOG_INFO) "Compiling resource $$(notdir $$($1_VERSIONINFO_RESOURCE)) (for $$(notdir $$($1_TARGET)))"
$(RC) $$($1_RC_FLAGS) $(CC_OUT_OPTION)$$@ $$($1_VERSIONINFO_RESOURCE)
# Windows RC compiler does not support -showIncludes, so we mis-use CL for this.
@@ -462,7 +479,9 @@
ifneq (,$$($1_MANIFEST))
$1_GEN_MANIFEST:=$$($1_OBJECT_DIR)/$$($1_PROGRAM).manifest
IMVERSIONVALUE:=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION).$(JDK_UPDATE_VERSION).$(COOKED_BUILD_NUMBER)
- $$($1_GEN_MANIFEST): $$($1_MANIFEST)
+ $1_MANIFEST_VARDEPS_FILE := $$(call DependOnVariable, IMVERSIONVALUE, \
+ $$($1_GEN_MANIFEST).vardeps)
+ $$($1_GEN_MANIFEST): $$($1_MANIFEST) $$($1_MANIFEST_VARDEPS_FILE)
$(SED) 's%IMVERSION%$$(IMVERSIONVALUE)%g;s%PROGRAM%$$($1_PROGRAM)%g' $$< > $$@
endif
endif
@@ -575,8 +594,13 @@
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
+ $1_VARDEPS := $$($1_LD) $(SYSROOT_LDFLAGS) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
+ $$($1_LDFLAGS_SUFFIX) $$($1_EXTRA_LDFLAGS_SUFFIX)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
$$($1_TARGET): $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_REAL_MAPFILE) \
- $$($1_DEBUGINFO_EXTRA_DEPS)
+ $$($1_DEBUGINFO_EXTRA_DEPS) $$($1_VARDEPS_FILE)
$(ECHO) $(LOG_INFO) "Linking $$($1_BASENAME)"
$$($1_LD) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(SYSROOT_LDFLAGS) \
$(LD_OUT_OPTION)$$@ \
@@ -592,8 +616,12 @@
endif
ifneq (,$$($1_STATIC_LIBRARY))
+ $1_VARDEPS := $(AR) $$($1_ARFLAGS) $$($1_LDFLAGS_SUFFIX) $$($1_EXTRA_LDFLAGS_SUFFIX)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
# Generating a static library, ie object file archive.
- $$($1_TARGET): $$($1_EXPECTED_OBJS) $$($1_RES)
+ $$($1_TARGET): $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_VARDEPS_FILE)
$(ECHO) $(LOG_INFO) "Archiving $$($1_STATIC_LIBRARY)"
$(AR) $$($1_ARFLAGS) $(AR_OUT_OPTION)$$($1_TARGET) $$($1_EXPECTED_OBJS) \
$$($1_RES) $$($1_LDFLAGS_SUFFIX) $$($1_EXTRA_LDFLAGS_SUFFIX)
@@ -603,8 +631,13 @@
# A executable binary has been specified, setup the target for it.
$1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
+ $1_VARDEPS := $$($1_LDEXE) $(SYSROOT_LDFLAGS) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
+ $$($1_LDFLAGS_SUFFIX) $$($1_EXTRA_LDFLAGS_SUFFIX)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
$$($1_TARGET): $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST) \
- $$($1_DEBUGINFO_EXTRA_DEPS)
+ $$($1_DEBUGINFO_EXTRA_DEPS) $$($1_VARDEPS_FILE)
$(ECHO) $(LOG_INFO) "Linking executable $$($1_BASENAME)"
$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(SYSROOT_LDFLAGS) \
$(EXE_OUT_OPTION)$$($1_TARGET) \
--- a/make/common/TextFileProcessing.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/make/common/TextFileProcessing.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -34,7 +34,7 @@
# param 3 = the target base directory
# param 4 = the target file name (possibly with a partial path)
define SetupSingleTextFileForProcessing
- $(strip $3)/$(strip $4): $2
+ $(strip $3)/$(strip $4): $2 $$($1_VARDEPS_FILE)
$(ECHO) $(LOG_INFO) "Processing $(strip $4)"
$(MKDIR) -p '$$(@D)'
$(RM) '$$@' '$$@.includes.tmp' '$$@.replacements.tmp'
@@ -193,6 +193,9 @@
$1_INCLUDES_COMMAND_LINE := $(CAT)
endif
+ $1_VARDEPS := $$($1_INCLUDES_COMMAND_LINE) $$($1_REPLACEMENTS_COMMAND_LINE)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS)
+
# Reset target list before populating it
$1 :=
--- a/test/make/TestJavaCompilation.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/test/make/TestJavaCompilation.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -50,6 +50,9 @@
clean-jar1:
$(RM) -r $(OUTPUT_DIR)/_jar1* $(OUTPUT_DIR)/jar1*
+$(JAR1_MANIFEST): | $(OUTPUT_DIR)/_jar1_created
+ $(ECHO) "Test-Attribute: value" > $(JAR1_MANIFEST)
+
$(OUTPUT_DIR)/_jar1_created: $(DEPS)
$(RM) -r $(JAR1_SRC_ROOT)
$(RM) $(JAR1_FILE)
@@ -61,7 +64,6 @@
$(TOUCH) $(JAR1_SRC_ROOT)/dir1/file1.class
$(TOUCH) $(JAR1_SRC_ROOT)/dir2/file2.class
$(TOUCH) $(JAR1_SRC_ROOT)/META-INF/metafile
- $(ECHO) "Test-Attribute: value" > $(JAR1_MANIFEST)
$(TOUCH) $@
$(eval $(call SetupArchive,BUILD_JAR1, \
@@ -77,7 +79,7 @@
$(DIFF) -r $(JAR1_SRC_ROOT)/dir1 $(JAR1_UNZIP)/dir1
$(DIFF) -r $(JAR1_SRC_ROOT)/dir2 $(JAR1_UNZIP)/dir2
$(DIFF) -r $(JAR1_SRC_ROOT)/META-INF/metafile $(JAR1_UNZIP)/META-INF/metafile
- if [ "`$(GREP) 'Test-Attribute: value' $(JAR1_MANIFEST)`" = "" ]; then \
+ if [ "`$(GREP) 'Test-Attribute: value' $(JAR1_UNZIP)/META-INF/MANIFEST.MF`" = "" ]; then \
$(ECHO) "Could not find Test-Attribute in manifest of $(JAR1_FILE)"; \
exit 1; \
fi
@@ -88,7 +90,7 @@
# Change a source file and call this makefile again to force the jar to be
# updated.
-$(OUTPUT_DIR)_jar1_updated: $(OUTPUT_DIR)/_jar1_verified
+$(OUTPUT_DIR)/_jar1_updated: $(OUTPUT_DIR)/_jar1_verified
$(ECHO) updated > $(JAR1_SRC_ROOT)/dir1/file1.class
$(ECHO) updated > $(JAR1_SRC_ROOT)/META-INF/metafile
$(TOUCH) $(OUTPUT_DIR)/_jar1_created
@@ -96,9 +98,26 @@
$(TOUCH) $@
update-jar1: $(OUTPUT_DIR)_jar1_updated
-TEST_TARGETS += $(OUTPUT_DIR)_jar1_updated
-.PHONY: clean-jar1 create-jar1 update-jar1
+# Change the manifest file and call this makefile again to force the jar
+# to be updated
+$(OUTPUT_DIR)/_jar1_updated_manifest: $(OUTPUT_DIR)/_jar1_updated
+ $(ECHO) "Test-Attribute: foobar" > $(JAR1_MANIFEST)
+ +$(MAKE) -f $(THIS_FILE) $(BUILD_JAR1)
+ $(RM) -r $(JAR1_UNZIP)
+ $(MKDIR) -p $(JAR1_UNZIP)
+ $(CD) $(JAR1_UNZIP) && $(UNZIP) $(JAR1_FILE) $(LOG_DEBUG)
+ if [ "`$(GREP) 'Test-Attribute: foobar' $(JAR1_UNZIP)/META-INF/MANIFEST.MF`" = "" ]; then \
+ $(ECHO) "Could not find Test-Attribute in manifest of $(JAR1_FILE)"; \
+ exit 1; \
+ fi
+ $(TOUCH) $@
+
+update-jar1-manifest: $(OUTPUT_DIR)/_jar1_updated_manifest
+
+TEST_TARGETS += $(OUTPUT_DIR)/_jar1_updated $(OUTPUT_DIR)/_jar1_updated_manifest
+
+.PHONY: clean-jar1 create-jar1 update-jar1 update-jar1-manifest
################################################################################
# Test: jar2
@@ -139,14 +158,14 @@
create-jar2: $(OUTPUT_DIR)/_jar2_verified
TEST_TARGETS += $(OUTPUT_DIR)/_jar2_verified
-$(OUTPUT_DIR)_jar2_updated: $(OUTPUT_DIR)/_jar2_verified
+$(OUTPUT_DIR)/_jar2_updated: $(OUTPUT_DIR)/_jar2_verified
$(ECHO) updated > $(JAR2_SRC_ROOT1)/dir1/file1.class
$(TOUCH) $(OUTPUT_DIR)/_jar2_created
+$(MAKE) -f $(THIS_FILE) $(OUTPUT_DIR)/_jar2_verified
$(TOUCH) $@
-update-jar2: $(OUTPUT_DIR)_jar2_updated
-TEST_TARGETS += $(OUTPUT_DIR)_jar2_updated
+update-jar2: $(OUTPUT_DIR)/_jar2_updated
+TEST_TARGETS += $(OUTPUT_DIR)/_jar2_updated
.PHONY: clean-jar2 create-jar2 update-jar2
@@ -200,14 +219,14 @@
create-jar3: $(OUTPUT_DIR)/_jar3_verified
TEST_TARGETS += $(OUTPUT_DIR)/_jar3_verified
-$(OUTPUT_DIR)_jar3_updated: $(OUTPUT_DIR)/_jar3_verified
+$(OUTPUT_DIR)/_jar3_updated: $(OUTPUT_DIR)/_jar3_verified
$(ECHO) updated > $(JAR3_SRC_ROOT2)/extra-file
$(TOUCH) $(OUTPUT_DIR)/_jar3_created
+$(MAKE) -f $(THIS_FILE) $(OUTPUT_DIR)/_jar3_verified
$(TOUCH) $@
-update-jar3: $(OUTPUT_DIR)_jar3_updated
-TEST_TARGETS += $(OUTPUT_DIR)_jar3_updated
+update-jar3: $(OUTPUT_DIR)/_jar3_updated
+TEST_TARGETS += $(OUTPUT_DIR)/_jar3_updated
.PHONY: clean-jar3 create-jar3 update-jar3
--- a/test/make/TestMakeBase.gmk Thu Jan 29 16:16:35 2015 -0800
+++ b/test/make/TestMakeBase.gmk Wed Jul 05 20:16:58 2017 +0200
@@ -33,7 +33,14 @@
$(SRC_ROOT)/make/common/MakeBase.gmk \
#
+# On macosx, file system timestamps only have 1 second resultion so must add
+# sleeps to properly test dependencies.
+ifeq ($(OPENJDK_BUILD_OS), macosx)
+ SLEEP_ON_MAC := sleep 1
+endif
+
OUTPUT_DIR := $(TESTMAKE_OUTPUTDIR)/make-base
+$(call MakeDir, $(OUTPUT_DIR))
################################################################################
# Escape $
@@ -56,5 +63,124 @@
TEST_TARGETS += $(ESCAPE_DOLLAR_DIR)/_escape_dollar
################################################################################
+# Test Equals
+
+EQUALS_VALUE1 := value1$(SPACE)
+EQUALS_VALUE2 := value2
+
+ifneq ($(call equals, $(EQUALS_VALUE1), $(EQUALS_VALUE2)), )
+ $(error The strings >$(EQUALS_VALUE1)< and >$(EQUALS_VALUE2)< are equal)
+endif
+
+ifeq ($(call equals, $(EQUALS_VALUE1), $(EQUALS_VALUE1)), )
+ $(error The strings >$(EQUALS_VALUE1)< and >$(EQUALS_VALUE1)< are not equal)
+endif
+
+################################################################################
+# Test ShellQuote
+
+SHELL_QUOTE_VALUE := foo '""' "''" bar
+SHELL_QUOTE_RESULT := $(shell $(ECHO) $(call ShellQuote, \
+ $(SHELL_QUOTE_VALUE)))
+
+ifneq ($(SHELL_QUOTE_VALUE), $(SHELL_QUOTE_RESULT))
+ $(error Expected: >$(SHELL_QUOTE_VALUE)< - Result: >$(SHELL_QUOTE_RESULT)<)
+endif
+
+################################################################################
+# Test read and write to file
+
+READ_WRITE_FILE := $(OUTPUT_DIR)/read-write
+READ_WRITE_VALUE := foo '""' "''" \t\n\\ bar
+$(call WriteFile, $(READ_WRITE_VALUE), $(READ_WRITE_FILE))
+READ_WRITE_RESULT := $(call ReadFile, $(READ_WRITE_FILE))
+
+ifneq ($(READ_WRITE_VALUE), $(READ_WRITE_RESULT))
+ $(error Expected: >$(READ_WRITE_VALUE)< - Result: >$(READ_WRITE_RESULT)<)
+endif
+
+################################################################################
+# Test creating dependencies on make variables
+
+VARDEP_DIR := $(OUTPUT_DIR)/vardep
+VARDEP_SRC_FILE := $(VARDEP_DIR)/src-file
+VARDEP_TARGET_FILE := $(VARDEP_DIR)/target-file
+VARDEP_FLAG_FILE := $(VARDEP_DIR)/flag-file
+
+$(VARDEP_DIR)/src-file:
+ $(MKDIR) -p $(@D)
+ $(ECHO) "some string XXX" > $@
+
+$(VARDEP_TARGET_FILE): $(VARDEP_DIR)/src-file \
+ $(call DependOnVariable, VARDEP_TEST_VAR)
+ $(MKDIR) -p $(@D)
+ $(SED) -e 's/XXX/$(VARDEP_TEST_VAR)/g' $< > $@
+ $(TOUCH) $(VARDEP_FLAG_FILE)
+
+test-vardep:
+ $(RM) $(VARDEP_SRC_FILE) $(VARDEP_TARGET_FILE) $(VARDEP_FLAG_FILE)
+ #
+ # Simply create the target file and verify that it has the correct value
+ #
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR=value1 $(VARDEP_TARGET_FILE)
+ $(PRINTF) "Expecting value1: %s\n" "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test "some string value1" = "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test -e $(VARDEP_FLAG_FILE)
+ #
+ # Make the target file again and verify that the value is updated with
+ # the new value
+ #
+ $(SLEEP_ON_MAC)
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR=value2 $(VARDEP_TARGET_FILE)
+ $(PRINTF) "Expecting value2: %s\n" "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test "some string value2" = "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test -e $(VARDEP_FLAG_FILE)
+ #
+ # Make the target again with the same value and verify that the recipe
+ # was never run by checking that the flag file was not recreated
+ #
+ $(SLEEP_ON_MAC)
+ $(RM) $(VARDEP_FLAG_FILE)
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR=value2 $(VARDEP_TARGET_FILE)
+ $(PRINTF) "Expecting value2: %s\n" "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test "some string value2" = "`$(CAT) $(VARDEP_DIR)/target-file`"
+ test ! -e $(VARDEP_FLAG_FILE)
+ #
+ # Test running with spaces at the end and the middle of the value
+ # and verify that the file isn't rewritten the second time
+ #
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR="value3 foo " $(VARDEP_TARGET_FILE)
+ $(RM) $(VARDEP_FLAG_FILE)
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR="value3 foo" $(VARDEP_TARGET_FILE)
+ test ! -e $(VARDEP_FLAG_FILE)
+ $(MAKE) -f $(THIS_FILE) VARDEP_TEST_VAR=" value3 foo" $(VARDEP_TARGET_FILE)
+ test ! -e $(VARDEP_FLAG_FILE)
+
+# Test specifying a specific value file to store variable in
+VARDEP_VALUE_FILE := $(VARDEP_DIR)/value-file
+VARDEP_TEST_VAR2 := value3
+
+VARDEP_RETURN_VALUE := $(call DependOnVariable, VARDEP_TEST_VAR2, $(VARDEP_VALUE_FILE))
+ifneq ($(VARDEP_VALUE_FILE), $(VARDEP_RETURN_VALUE))
+ $(error Expected: $(VARDEP_VALUE_FILE) - DependOnVariable: $(VARDEP_RETURN_VALUE))
+endif
+VARDEP_FILE_CONTENTS := $(shell $(CAT) $(VARDEP_VALUE_FILE))
+ifneq ($(VARDEP_TEST_VAR2), $(VARDEP_FILE_CONTENTS))
+ $(error Expected: $(VARDEP_TEST_VAR2) - DependOnVariable file contained: \
+ $(VARDEP_FILE_CONTENTS))
+endif
+
+# Test with a variable value containing some problematic characters
+VARDEP_TEST_VAR3 := foo '""' "''" bar
+VARDEP_VALUE_FILE := $(call DependOnVariable, VARDEP_TEST_VAR3)
+VARDEP_FILE_CONTENTS := $(shell $(CAT) $(VARDEP_VALUE_FILE))
+ifneq ($(VARDEP_TEST_VAR3), $(VARDEP_FILE_CONTENTS))
+ $(error Expected: >$(VARDEP_TEST_VAR3)< - DependOnVariable file contained: \
+ >$(VARDEP_FILE_CONTENTS)<)
+endif
+
+TEST_TARGETS += test-vardep
+
+################################################################################
all: $(TEST_TARGETS)