# HG changeset patch # User prr # Date 1526404432 25200 # Node ID d93ae85b18c183bd5a099b0afa778721b157359c # Parent 9822dd521c15a3645d701f17e8b25f6efc2117ca# Parent 1dc98fa30b140ee649f059ebe79e6746329bf021 Merge diff -r 9822dd521c15 -r d93ae85b18c1 .hgignore --- a/.hgignore Tue May 15 18:03:31 2018 +0530 +++ b/.hgignore Tue May 15 10:13:52 2018 -0700 @@ -13,3 +13,4 @@ NashornProfile.txt .*/JTreport/.* .*/JTwork/.* +.*/.git/.* diff -r 9822dd521c15 -r d93ae85b18c1 .hgtags --- a/.hgtags Tue May 15 18:03:31 2018 +0530 +++ b/.hgtags Tue May 15 10:13:52 2018 -0700 @@ -484,3 +484,4 @@ 69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10 e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11 3ab6ba9f94a9045a526d645af26c933235371d6f jdk-11+12 +758deedaae8406ae60147486107a54e9864aa7b0 jdk-11+13 diff -r 9822dd521c15 -r d93ae85b18c1 make/CreateJmods.gmk --- a/make/CreateJmods.gmk Tue May 15 18:03:31 2018 +0530 +++ b/make/CreateJmods.gmk Tue May 15 10:13:52 2018 -0700 @@ -121,11 +121,21 @@ ifeq ($(OPENJDK_TARGET_OS), windows) # Only java.base needs to include the MSVC*_DLLs. Make sure no other module # tries to include them (typically imported ones). - ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCR_DLL))), ) - JMOD_FLAGS += --exclude '$(notdir $(MSVCR_DLL))' + ifneq ($(MSVCR_DLL), ) + ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCR_DLL))), ) + JMOD_FLAGS += --exclude '$(notdir $(MSVCR_DLL))' + endif endif - ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCP_DLL))), ) - JMOD_FLAGS += --exclude '$(notdir $(MSVCP_DLL))' + ifneq ($(MSVCP_DLL), ) + ifneq ($(wildcard $(LIBS_DIR)/$(notdir $(MSVCP_DLL))), ) + JMOD_FLAGS += --exclude '$(notdir $(MSVCP_DLL))' + endif + endif + ifneq ($(UCRT_DLL_DIR), ) + UCRT_DLL_FILES := $(notdir $(wildcard $(UCRT_DLL_DIR)/*.dll)) + ifneq ($(wildcard $(LIBS_DIR)/$(firstword $(UCRT_DLL_FILES))), ) + JMOD_FLAGS += $(patsubst %, --exclude '%', $(UCRT_DLL_FILES)) + endif endif endif endif diff -r 9822dd521c15 -r d93ae85b18c1 make/autoconf/basics.m4 --- a/make/autoconf/basics.m4 Tue May 15 18:03:31 2018 +0530 +++ b/make/autoconf/basics.m4 Tue May 15 10:13:52 2018 -0700 @@ -671,6 +671,8 @@ BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_MSVCR_DLL]) # Corresponds to --with-msvcp-dll BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_MSVCP_DLL]) + # Corresponds to --with-ucrt-dll-dir + BASIC_EVAL_DEVKIT_VARIABLE([DEVKIT_UCRT_DLL_DIR]) fi AC_MSG_CHECKING([for devkit]) diff -r 9822dd521c15 -r d93ae85b18c1 make/autoconf/hotspot.m4 --- a/make/autoconf/hotspot.m4 Tue May 15 18:03:31 2018 +0530 +++ b/make/autoconf/hotspot.m4 Tue May 15 10:13:52 2018 -0700 @@ -206,7 +206,7 @@ if test "x$ENABLE_AOT" = "xtrue"; then # Only enable AOT on X64 platforms. - if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then + if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then if test -e "${TOPDIR}/src/jdk.aot"; then if test -e "${TOPDIR}/src/jdk.internal.vm.compiler"; then ENABLE_AOT="true" diff -r 9822dd521c15 -r d93ae85b18c1 make/autoconf/spec.gmk.in --- a/make/autoconf/spec.gmk.in Tue May 15 18:03:31 2018 +0530 +++ b/make/autoconf/spec.gmk.in Tue May 15 10:13:52 2018 -0700 @@ -736,6 +736,7 @@ LIBZIP_CAN_USE_MMAP:=@LIBZIP_CAN_USE_MMAP@ MSVCR_DLL:=@MSVCR_DLL@ MSVCP_DLL:=@MSVCP_DLL@ +UCRT_DLL_DIR:=@UCRT_DLL_DIR@ STLPORT_LIB:=@STLPORT_LIB@ #################################################### diff -r 9822dd521c15 -r d93ae85b18c1 make/autoconf/toolchain_windows.m4 --- a/make/autoconf/toolchain_windows.m4 Tue May 15 18:03:31 2018 +0530 +++ b/make/autoconf/toolchain_windows.m4 Tue May 15 10:13:52 2018 -0700 @@ -76,6 +76,7 @@ VS_MSVCR_2017=vcruntime140.dll VS_MSVCP_2017=msvcp140.dll VS_ENVVAR_2017="VS150COMNTOOLS" +VS_USE_UCRT_2017="true" VS_VS_INSTALLDIR_2017="Microsoft Visual Studio/2017" VS_EDITIONS_2017="BuildTools Community Professional Enterprise" VS_SDK_INSTALLDIR_2017= @@ -264,6 +265,7 @@ eval VS_VERSION_INTERNAL="\${VS_VERSION_INTERNAL_${VS_VERSION}}" eval MSVCR_NAME="\${VS_MSVCR_${VS_VERSION}}" eval MSVCP_NAME="\${VS_MSVCP_${VS_VERSION}}" + eval USE_UCRT="\${VS_USE_UCRT_${VS_VERSION}}" eval PLATFORM_TOOLSET="\${VS_VS_PLATFORM_NAME_${VS_VERSION}}" VS_PATH="$TOOLCHAIN_PATH:$PATH" @@ -309,6 +311,7 @@ eval VS_VERSION_INTERNAL="\${VS_VERSION_INTERNAL_${VS_VERSION}}" eval MSVCR_NAME="\${VS_MSVCR_${VS_VERSION}}" eval MSVCP_NAME="\${VS_MSVCP_${VS_VERSION}}" + eval USE_UCRT="\${VS_USE_UCRT_${VS_VERSION}}" # The rest of the variables are already evaled while probing AC_MSG_NOTICE([Found $VS_DESCRIPTION]) break @@ -432,8 +435,11 @@ VS_INCLUDE=`$ECHO "$VS_INCLUDE" | $SED -e 's/\\\\*;* *$//'` VS_LIB=`$ECHO "$VS_LIB" | $SED 's/\\\\*;* *$//'` VCINSTALLDIR=`$ECHO "$VCINSTALLDIR" | $SED 's/\\\\* *$//'` - WindowsSDKDir=`$ECHO "$WindowsSDKDir" | $SED 's/\\\\* *$//'` + WindowsSdkDir=`$ECHO "$WindowsSdkDir" | $SED 's/\\\\* *$//'` WINDOWSSDKDIR=`$ECHO "$WINDOWSSDKDIR" | $SED 's/\\\\* *$//'` + if test -z "$WINDOWSSDKDIR"; then + WINDOWSSDKDIR="$WindowsSdkDir" + fi # Remove any paths containing # (typically F#) as that messes up make. This # is needed if visual studio was installed with F# support. VS_PATH=`$ECHO "$VS_PATH" | $SED 's/[[^:#]]*#[^:]*://g'` @@ -539,7 +545,7 @@ if test "x$MSVC_DLL" = x; then if test "x$VCINSTALLDIR" != x; then CYGWIN_VC_INSTALL_DIR="$VCINSTALLDIR" - BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(CYGWIN_VC_INSTALL_DIR) + BASIC_FIXUP_PATH(CYGWIN_VC_INSTALL_DIR) if test "$VS_VERSION" -lt 2017; then # Probe: Using well-known location from Visual Studio 12.0 and older if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then @@ -673,4 +679,41 @@ fi AC_SUBST(MSVCP_DLL) fi + + AC_ARG_WITH(ucrt-dll-dir, [AS_HELP_STRING([--with-ucrt-dll-dir], + [path to Microsoft Windows Kit UCRT DLL dir (Windows only) @<:@probed@:>@])]) + + if test "x$USE_UCRT" = "xtrue"; then + AC_MSG_CHECKING([for UCRT DLL dir]) + if test "x$with_ucrt_dll_dir" != x; then + if test -z "$(ls -d "$with_ucrt_dll_dir/*.dll" 2> /dev/null)"; then + AC_MSG_RESULT([no]) + AC_MSG_ERROR([Could not find any dlls in $with_ucrt_dll_dir]) + else + AC_MSG_RESULT([$with_ucrt_dll_dir]) + UCRT_DLL_DIR="$with_ucrt_dll_dir" + BASIC_FIXUP_PATH([UCRT_DLL_DIR]) + fi + elif test "x$DEVKIT_UCRT_DLL_DIR" != "x"; then + UCRT_DLL_DIR="$DEVKIT_UCRT_DLL_DIR" + AC_MSG_RESULT($UCRT_DLL_DIR) + else + CYGWIN_WINDOWSSDKDIR="${WINDOWSSDKDIR}" + BASIC_FIXUP_PATH([CYGWIN_WINDOWSSDKDIR]) + dll_subdir=$OPENJDK_TARGET_CPU + if test "x$dll_subdir" = "xx86_64"; then + dll_subdir="x64" + fi + UCRT_DLL_DIR="$CYGWIN_WINDOWSSDKDIR/Redist/ucrt/DLLs/$dll_subdir" + if test -z "$(ls -d "$UCRT_DLL_DIR/"*.dll 2> /dev/null)"; then + AC_MSG_RESULT([no]) + AC_MSG_ERROR([Could not find any dlls in $UCRT_DLL_DIR]) + else + AC_MSG_RESULT($UCRT_DLL_DIR) + fi + fi + else + UCRT_DLL_DIR= + fi + AC_SUBST(UCRT_DLL_DIR) ]) diff -r 9822dd521c15 -r d93ae85b18c1 make/copy/Copy-java.base.gmk --- a/make/copy/Copy-java.base.gmk Tue May 15 18:03:31 2018 +0530 +++ b/make/copy/Copy-java.base.gmk Tue May 15 10:13:52 2018 -0700 @@ -65,6 +65,17 @@ MACRO := copy-and-chmod)) TARGETS += $(COPY_MSVCR) $(COPY_MSVCP) + + ifneq ($(UCRT_DLL_DIR), ) + $(eval $(call SetupCopyFiles, COPY_UCRT_DLLS, \ + DEST := $(LIB_DST_DIR), \ + SRC := $(UCRT_DLL_DIR), \ + FILES := $(wildcard $(UCRT_DLL_DIR)/*.dll), \ + MACRO := copy-and-chmod, \ + )) + + TARGETS += $(COPY_UCRT_DLLS) + endif endif ################################################################################ @@ -117,23 +128,23 @@ $(RM) $(@) # Now check for other permutations ifeq ($(call check-jvm-variant, server), true) - $(PRINTF) "-server KNOWN\n">>$(@) - $(PRINTF) "-client ALIASED_TO -server\n">>$(@) + $(PRINTF) -- "-server KNOWN\n">>$(@) + $(PRINTF) -- "-client ALIASED_TO -server\n">>$(@) ifeq ($(call check-jvm-variant, minimal), true) - $(PRINTF) "-minimal KNOWN\n">>$(@) + $(PRINTF) -- "-minimal KNOWN\n">>$(@) endif else ifeq ($(call check-jvm-variant, client), true) - $(PRINTF) "-client KNOWN\n">>$(@) - $(PRINTF) "-server ALIASED_TO -client\n">>$(@) + $(PRINTF) -- "-client KNOWN\n">>$(@) + $(PRINTF) -- "-server ALIASED_TO -client\n">>$(@) ifeq ($(call check-jvm-variant, minimal), true) - $(PRINTF) "-minimal KNOWN\n">>$(@) + $(PRINTF) -- "-minimal KNOWN\n">>$(@) endif else ifeq ($(call check-jvm-variant, minimal), true) - $(PRINTF) "-minimal KNOWN\n">>$(@) - $(PRINTF) "-server ALIASED_TO -minimal\n">>$(@) - $(PRINTF) "-client ALIASED_TO -minimal\n">>$(@) + $(PRINTF) -- "-minimal KNOWN\n">>$(@) + $(PRINTF) -- "-server ALIASED_TO -minimal\n">>$(@) + $(PRINTF) -- "-client ALIASED_TO -minimal\n">>$(@) endif endif endif diff -r 9822dd521c15 -r d93ae85b18c1 make/devkit/createWindowsDevkit2017.sh --- a/make/devkit/createWindowsDevkit2017.sh Tue May 15 18:03:31 2018 +0530 +++ b/make/devkit/createWindowsDevkit2017.sh Tue May 15 10:13:52 2018 -0700 @@ -130,6 +130,8 @@ cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/ cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/ cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/ + mkdir -p $DEVKIT_ROOT/$SDK_VERSION/Redist + cp -r "$SDK_INSTALL_DIR/Redist/ucrt" $DEVKIT_ROOT/$SDK_VERSION/Redist/ mkdir -p $DEVKIT_ROOT/$SDK_VERSION/include cp -r "$SDK_INSTALL_DIR/include/$SDK_FULL_VERSION/"* $DEVKIT_ROOT/$SDK_VERSION/include/ fi @@ -152,12 +154,14 @@ echo-info "DEVKIT_VS_LIB_x86=\"\$DEVKIT_ROOT/VC/lib/x86;\$DEVKIT_ROOT/VC/atlmfc/lib/x86;\$DEVKIT_ROOT/$SDK_VERSION/lib/x86\"" echo-info "DEVKIT_MSVCR_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL\"" echo-info "DEVKIT_MSVCP_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL\"" +echo-info "DEVKIT_UCRT_DLL_DIR_x86=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x86\"" echo-info "" echo-info "DEVKIT_TOOLCHAIN_PATH_x86_64=\"\$DEVKIT_ROOT/VC/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\"" echo-info "DEVKIT_VS_INCLUDE_x86_64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\"" echo-info "DEVKIT_VS_LIB_x86_64=\"\$DEVKIT_ROOT/VC/lib/x64;\$DEVKIT_ROOT/VC/atlmfc/lib/x64;\$DEVKIT_ROOT/$SDK_VERSION/lib/x64\"" echo-info "DEVKIT_MSVCR_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL\"" echo-info "DEVKIT_MSVCP_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL\"" +echo-info "DEVKIT_UCRT_DLL_DIR_x86_64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x64\"" ################################################################################ # Copy this script diff -r 9822dd521c15 -r d93ae85b18c1 make/hotspot/lib/JvmFeatures.gmk --- a/make/hotspot/lib/JvmFeatures.gmk Tue May 15 18:03:31 2018 +0530 +++ b/make/hotspot/lib/JvmFeatures.gmk Tue May 15 10:13:52 2018 -0700 @@ -128,8 +128,9 @@ ifneq ($(call check-jvm-feature, aot), true) JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0 JVM_EXCLUDE_FILES += \ - compiledIC_aot_x86_64.cpp compilerRuntime.cpp \ - aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp + compiledIC_aot_x86_64.cpp compiledIC_aot_aarch64.cpp \ + compilerRuntime.cpp aotCodeHeap.cpp aotCompiledMethod.cpp \ + aotLoader.cpp compiledIC_aot.cpp endif ifneq ($(call check-jvm-feature, cmsgc), true) diff -r 9822dd521c15 -r d93ae85b18c1 make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java --- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java Tue May 15 18:03:31 2018 +0530 +++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java Tue May 15 10:13:52 2018 -0700 @@ -109,6 +109,7 @@ private static final String[] AVAILABLE_TZIDS = TimeZone.getAvailableIDs(); private static String zoneNameTempFile; private static String tzDataDir; + private static final Map canonicalTZMap = new HashMap<>(); static enum DraftType { UNCONFIRMED, @@ -439,6 +440,15 @@ // Parse timezone handlerTimeZone = new TimeZoneParseHandler(); parseLDMLFile(new File(TIMEZONE_SOURCE_FILE), handlerTimeZone); + + // canonical tz name map + // alias -> primary + handlerTimeZone.getData().forEach((k, v) -> { + String[] ids = ((String)v).split("\\s"); + for (int i = 1; i < ids.length; i++) { + canonicalTZMap.put(ids[i], ids[0]); + } + }); } private static void parseLDMLFile(File srcfile, AbstractLDMLHandler handler) throws Exception { @@ -658,7 +668,27 @@ handlerMetaZones.get(tzid) == null || handlerMetaZones.get(tzid) != null && map.get(METAZONE_ID_PREFIX + handlerMetaZones.get(tzid)) == null) { - // First, check the CLDR meta key + + // First, check the alias + String canonID = canonicalTZMap.get(tzid); + if (canonID != null && !tzid.equals(canonID)) { + Object value = map.get(TIMEZONE_ID_PREFIX + canonID); + if (value != null) { + names.put(tzid, value); + return; + } else { + String meta = handlerMetaZones.get(canonID); + if (meta != null) { + value = map.get(METAZONE_ID_PREFIX + meta); + if (value != null) { + names.put(tzid, meta); + return; + } + } + } + } + + // Check the CLDR meta key Optional> cldrMeta = handlerMetaZones.getData().entrySet().stream() .filter(me -> @@ -666,7 +696,7 @@ (String[])map.get(METAZONE_ID_PREFIX + me.getValue()))) .findAny(); cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> { - // check the JRE meta key, add if there is not. + // Check the JRE meta key, add if there is not. Optional> jreMeta = jreMetaMap.entrySet().stream() .filter(jm -> Arrays.deepEquals(data, jm.getKey())) @@ -1024,16 +1054,9 @@ } private static Stream zidMapEntry() { - Map canonMap = new HashMap<>(); - handlerTimeZone.getData().entrySet().stream() - .forEach(e -> { - String[] ids = ((String)e.getValue()).split("\\s"); - for (int i = 1; i < ids.length; i++) { - canonMap.put(ids[i], ids[0]); - }}); return ZoneId.getAvailableZoneIds().stream() .map(id -> { - String canonId = canonMap.getOrDefault(id, id); + String canonId = canonicalTZMap.getOrDefault(id, id); String meta = handlerMetaZones.get(canonId); String zone001 = handlerMetaZones.zidMap().get(meta); return zone001 == null ? "" : diff -r 9822dd521c15 -r d93ae85b18c1 make/langtools/build.properties --- a/make/langtools/build.properties Tue May 15 18:03:31 2018 +0530 +++ b/make/langtools/build.properties Tue May 15 10:13:52 2018 -0700 @@ -24,9 +24,7 @@ # #javac configuration for "normal build" (these will be passed to the bootstrap compiler): -javac.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-options,-exports -Werror -g:source,lines,vars -javac.source = 9 -javac.target = 9 +javac.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-exports -Werror -g:source,lines,vars #version used to compile build tools javac.build.opts = -XDignore.symbol.file=true -Xlint:all,-deprecation,-options -Werror -g:source,lines,vars diff -r 9822dd521c15 -r d93ae85b18c1 make/langtools/build.xml --- a/make/langtools/build.xml Tue May 15 18:03:31 2018 +0530 +++ b/make/langtools/build.xml Tue May 15 10:13:52 2018 -0700 @@ -232,7 +232,6 @@ ${source.files} - diff -r 9822dd521c15 -r d93ae85b18c1 make/launcher/Launcher-jdk.aot.gmk --- a/make/launcher/Launcher-jdk.aot.gmk Tue May 15 18:03:31 2018 +0530 +++ b/make/launcher/Launcher-jdk.aot.gmk Tue May 15 10:13:52 2018 -0700 @@ -41,6 +41,7 @@ , \ JAVA_ARGS := --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ + --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.sparc=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ --add-exports=jdk.internal.vm.ci/jdk.vm.ci.meta=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ --add-exports=jdk.internal.vm.ci/jdk.vm.ci.runtime=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \ diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/assembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Tue May 15 10:13:52 2018 -0700 @@ -2410,7 +2410,8 @@ #define INSN(NAME, opcode) \ void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ starti; \ - f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0b001110, 15, 10); \ + f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \ + f(opcode, 14, 12), f(0b10, 11, 10); \ rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \ f(T & 1, 30), f(T >> 1, 23, 22); \ } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -48,11 +48,14 @@ __ b(_continuation); } -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) - : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) - , _index(index) -{ +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} + +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } @@ -69,14 +72,16 @@ } if (_index->is_cpu_register()) { - __ mov(rscratch1, _index->as_register()); + __ mov(r22, _index->as_register()); } else { - __ mov(rscratch1, _index->as_jint()); + __ mov(r22, _index->as_jint()); } Runtime1::StubID stub_id; if (_throw_index_out_of_bounds_exception) { stub_id = Runtime1::throw_index_exception_id; } else { + assert(_array != NULL, "sanity"); + __ mov(r23, _array->as_pointer_register()); stub_id = Runtime1::throw_range_check_failed_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -2807,7 +2807,8 @@ } -void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { +void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + assert(patch_code == lir_patch_none, "Patch code not supported"); __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -323,7 +323,7 @@ // target: the entry point of the method that creates and posts the exception oop -// has_argument: true if the exception needs an argument (passed in rscratch1) +// has_argument: true if the exception needs arguments (passed in r22 and r23) OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { // make a frame and preserve the caller's caller-save registers @@ -332,7 +332,7 @@ if (!has_argument) { call_offset = __ call_RT(noreg, noreg, target); } else { - call_offset = __ call_RT(noreg, noreg, target, rscratch1); + call_offset = __ call_RT(noreg, noreg, target, r22, r23); } OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp --- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,8 +56,17 @@ } // static stub relocation stores the instruction address of the call __ relocate(static_stub_Relocation::spec(mark)); - // static stub relocation also tags the Method* in the code-stream. + +#if INCLUDE_AOT + // Don't create a Metadata reloc if we're generating immutable PIC. + if (cbuf.immutable_PIC()) { + __ movptr(rmethod, 0); + } else { + __ mov_metadata(rmethod, (Metadata*)NULL); + } +#else __ mov_metadata(rmethod, (Metadata*)NULL); +#endif __ movptr(rscratch1, 0); __ br(rscratch1); @@ -83,6 +92,61 @@ return 4; // 3 in emit_to_interp_stub + 1 in emit_call } +#if INCLUDE_AOT +#define __ _masm. +void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) { + if (!UseAOT) { + return; + } + // Stub is fixed up when the corresponding call is converted from + // calling compiled code to calling aot code. + // mov r, imm64_aot_code_address + // jmp r + + if (mark == NULL) { + mark = cbuf.insts_mark(); // Get mark within main instrs section. + } + + // Note that the code buffer's insts_mark is always relative to insts. + // That's why we must use the macroassembler to generate a stub. + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_aot_stub_size()); + guarantee(base != NULL, "out of space"); + + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark, true /* is_aot */)); + // Load destination AOT code address. + __ movptr(rscratch1, 0); // address is zapped till fixup time. + // This is recognized as unresolved by relocs/nativeinst/ic code. + __ br(rscratch1); + + assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size"); + + // Update current stubs pointer and restore insts_end. + __ end_a_stub(); +} +#undef __ + +int CompiledStaticCall::to_aot_stub_size() { + if (UseAOT) { + return 5 * 4; // movz; movk; movk; movk; br + } else { + return 0; + } +} + +// Relocation entries for call stub, compiled java to aot. +int CompiledStaticCall::reloc_to_aot_stub() { + if (UseAOT) { + return 5 * 4; // movz; movk; movk; movk; br + } else { + return 0; + } +} +#endif // INCLUDE_AOT + void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { address stub = find_stub(false /* is_aot */); guarantee(stub != NULL, "stub not found"); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/compiledIC_aot_aarch64.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/compiledIC_aot_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "aot/compiledIC_aot.hpp" +#include "code/codeCache.hpp" +#include "memory/resourceArea.hpp" + +void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) { + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s", + p2i(instruction_address()), + callee->name_and_sig_as_C_string()); + } + + set_destination_mt_safe(entry); +} + +void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + p2i(instruction_address()), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + + intptr_t data = method_loader->data(); + address destination = jump->destination(); + assert(data == 0 || data == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(destination == (address)Universe::non_oop_word() + || destination == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_loader->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +#ifdef NEVER_CALLED +void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + method_loader->set_data(0); + jump->set_jump_destination((address)-1); +} +#endif + +#ifndef PRODUCT +void CompiledPltStaticCall::verify() { + // Verify call. + _call->verify(); + +#ifdef ASSERT + CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call); + assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod"); +#endif + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} +#endif // !PRODUCT diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -116,7 +116,7 @@ // Do we need to load the previous value? if (obj != noreg) { - __ load_heap_oop(pre_val, Address(obj, 0)); + __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); } // Is the previous value null? @@ -294,7 +294,7 @@ false /* expand_call */); if (val == noreg) { - __ store_heap_oop_null(Address(r3, 0)); + BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg); } else { // G1 barrier needs uncompressed oop for region cross check. Register new_val = val; @@ -302,7 +302,7 @@ new_val = rscratch2; __ mov(new_val, val); } - __ store_heap_oop(Address(r3, 0), val); + BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg); g1_write_barrier_post(masm, r3 /* store_adr */, new_val /* new_val */, diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -35,11 +35,21 @@ bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; + bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; switch (type) { case T_OBJECT: case T_ARRAY: { if (on_heap) { - __ load_heap_oop(dst, src); + if (UseCompressedOops) { + __ ldrw(dst, src); + if (oop_not_null) { + __ decode_heap_oop_not_null(dst); + } else { + __ decode_heap_oop(dst); + } + } else { + __ ldr(dst, src); + } } else { assert(on_root, "why else?"); __ ldr(dst, src); @@ -57,8 +67,17 @@ switch (type) { case T_OBJECT: case T_ARRAY: { + val = val == noreg ? zr : val; if (on_heap) { - __ store_heap_oop(dst, val); + if (UseCompressedOops) { + assert(!dst.uses(val), "not enough registers"); + if (val != zr) { + __ encode_heap_oop(val); + } + __ strw(val, dst); + } else { + __ str(val, dst); + } } else { assert(on_root, "why else?"); __ str(val, dst); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -90,13 +90,14 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { + bool in_heap = (decorators & IN_HEAP) != 0; bool on_array = (decorators & IN_HEAP_ARRAY) != 0; bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; bool precise = on_array || on_anonymous; - if (val == noreg) { - __ store_heap_oop_null(dst); - } else { - __ store_heap_oop(dst, val); + + bool needs_post_barrier = val != noreg && in_heap; + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg); + if (needs_post_barrier) { // flatten object address if needed if (!precise || (dst.index() == noreg && dst.offset() == 0)) { store_check(masm, dst.base(), dst); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -278,8 +278,7 @@ resolve_oop_handle(result, tmp); // Add in the index add(result, result, index); - BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->load_at(this, IN_HEAP, T_OBJECT, result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp, /*tmp_thread*/ noreg); + load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); } void InterpreterMacroAssembler::load_resolved_klass_at_offset( diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp --- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -35,6 +35,9 @@ return pc_offset + NativeCall::instruction_size; } else if (inst->is_general_jump()) { return pc_offset + NativeGeneralJump::instruction_size; + } else if (NativeInstruction::is_adrp_at((address)inst)) { + // adrp; add; blr + return pc_offset + 3 * NativeInstruction::instruction_size; } else { JVMCI_ERROR_0("unsupported type of instruction for call site"); } @@ -81,7 +84,8 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) { address pc = _instructions->start() + pc_offset; NativeInstruction* inst = nativeInstruction_at(pc); - if (inst->is_adr_aligned() || inst->is_ldr_literal()) { + if (inst->is_adr_aligned() || inst->is_ldr_literal() + || (NativeInstruction::maybe_cpool_ref(pc))) { address dest = _constants->start() + data_offset; _instructions->relocate(pc, section_word_Relocation::spec((address) dest, CodeBuffer::SECT_CONSTS)); TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset); @@ -104,6 +108,10 @@ NativeGeneralJump* jump = nativeGeneralJump_at(pc); jump->set_jump_destination((address) foreign_call_destination); _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec()); + } else if (NativeInstruction::is_adrp_at((address)inst)) { + // adrp; add; blr + MacroAssembler::pd_patch_instruction_size((address)inst, + (address)foreign_call_destination); } else { JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc)); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -3975,41 +3975,48 @@ movk(dst, nk & 0xffff); } -void MacroAssembler::load_heap_oop(Register dst, Address src) -{ - if (UseCompressedOops) { - ldrw(dst, src); - decode_heap_oop(dst); +void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, + Register dst, Address src, + Register tmp1, Register thread_tmp) { + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); + bool as_raw = (decorators & AS_RAW) != 0; + if (as_raw) { + bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); } else { - ldr(dst, src); + bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); } } -void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) -{ - if (UseCompressedOops) { - ldrw(dst, src); - decode_heap_oop_not_null(dst); +void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, + Address dst, Register src, + Register tmp1, Register thread_tmp) { + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); + bool as_raw = (decorators & AS_RAW) != 0; + if (as_raw) { + bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp); } else { - ldr(dst, src); + bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp); } } -void MacroAssembler::store_heap_oop(Address dst, Register src) { - if (UseCompressedOops) { - assert(!dst.uses(src), "not enough registers"); - encode_heap_oop(src); - strw(src, dst); - } else - str(src, dst); +void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, + Register thread_tmp, DecoratorSet decorators) { + access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); +} + +void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, + Register thread_tmp, DecoratorSet decorators) { + access_load_at(T_OBJECT, IN_HEAP | OOP_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); +} + +void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1, + Register thread_tmp, DecoratorSet decorators) { + access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); } // Used for storing NULLs. void MacroAssembler::store_heap_oop_null(Address dst) { - if (UseCompressedOops) { - strw(zr, dst); - } else - str(zr, dst); + access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg); } Address MacroAssembler::allocate_metadata_address(Metadata* obj) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue May 15 10:13:52 2018 -0700 @@ -789,10 +789,19 @@ void resolve_oop_handle(Register result, Register tmp = r5); void load_mirror(Register dst, Register method, Register tmp = r5); - void load_heap_oop(Register dst, Address src); + void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, + Register tmp1, Register tmp_thread); + + void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, + Register tmp1, Register tmp_thread); - void load_heap_oop_not_null(Register dst, Address src); - void store_heap_oop(Address dst, Register src); + void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, + Register thread_tmp = noreg, DecoratorSet decorators = 0); + + void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, + Register thread_tmp = noreg, DecoratorSet decorators = 0); + void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, + Register tmp_thread = noreg, DecoratorSet decorators = 0); // currently unimplemented // Used for storing NULL. All other oop constants should be diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp --- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -135,11 +135,11 @@ // Load the invoker, as MH -> MH.form -> LF.vmentry __ verify_oop(recv); - __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); + __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), temp2); __ verify_oop(method_temp); - __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); + __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), temp2); __ verify_oop(method_temp); - __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()))); + __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2); __ verify_oop(method_temp); __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()))); @@ -311,7 +311,7 @@ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { Label L_ok; Register temp2_defc = temp2; - __ load_heap_oop(temp2_defc, member_clazz); + __ load_heap_oop(temp2_defc, member_clazz, temp3); load_klass_from_Class(_masm, temp2_defc); __ verify_klass_ptr(temp2_defc); __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -36,7 +36,120 @@ #include "c1/c1_Runtime1.hpp" #endif -void NativeCall::verify() { ; } +void NativeCall::verify() { + assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); +} + +void NativeInstruction::wrote(int offset) { + ICache::invalidate_word(addr_at(offset)); +} + +void NativeLoadGot::report_and_fail() const { + tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); + fatal("not a indirect rip mov to rbx"); +} + +void NativeLoadGot::verify() const { + assert(is_adrp_at((address)this), "must be adrp"); +} + +address NativeLoadGot::got_address() const { + return MacroAssembler::target_addr_for_insn((address)this); +} + +intptr_t NativeLoadGot::data() const { + return *(intptr_t *) got_address(); +} + +address NativePltCall::destination() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + return *(address*)MacroAssembler::target_addr_for_insn((address)jump); +} + +address NativePltCall::plt_entry() const { + return MacroAssembler::target_addr_for_insn((address)this); +} + +address NativePltCall::plt_jump() const { + address entry = plt_entry(); + // Virtual PLT code has move instruction first + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + return nativeLoadGot_at(entry)->next_instruction_address(); + } +} + +address NativePltCall::plt_load_got() const { + address entry = plt_entry(); + if (!((NativeGotJump*)entry)->is_GotJump()) { + // Virtual PLT code has move instruction first + return entry; + } else { + // Static PLT code has move instruction second (from c2i stub) + return nativeGotJump_at(entry)->next_instruction_address(); + } +} + +address NativePltCall::plt_c2i_stub() const { + address entry = plt_load_got(); + // This method should be called only for static calls which has C2I stub. + NativeLoadGot* load = nativeLoadGot_at(entry); + return entry; +} + +address NativePltCall::plt_resolve_call() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address entry = jump->next_instruction_address(); + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + // c2i stub 2 instructions + entry = nativeLoadGot_at(entry)->next_instruction_address(); + return nativeGotJump_at(entry)->next_instruction_address(); + } +} + +void NativePltCall::reset_to_plt_resolve_call() { + set_destination_mt_safe(plt_resolve_call()); +} + +void NativePltCall::set_destination_mt_safe(address dest) { + // rewriting the value in the GOT, it should always be aligned + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address* got = (address *) jump->got_address(); + *got = dest; +} + +void NativePltCall::set_stub_to_clean() { + NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + method_loader->set_data(0); + jump->set_jump_destination((address)-1); +} + +void NativePltCall::verify() const { + assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); +} + +address NativeGotJump::got_address() const { + return MacroAssembler::target_addr_for_insn((address)this); +} + +address NativeGotJump::destination() const { + address *got_entry = (address *) got_address(); + return *got_entry; +} + +bool NativeGotJump::is_GotJump() const { + NativeInstruction *insn = + nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size)); + return insn->encoding() == 0xd61f0200; // br x16 +} + +void NativeGotJump::verify() const { + assert(is_adrp_at((address)this), "must be adrp"); +} address NativeCall::destination() const { address addr = (address)this; @@ -71,6 +184,7 @@ ResourceMark rm; int code_size = NativeInstruction::instruction_size; address addr_call = addr_at(0); + bool reachable = Assembler::reachable_from_branch_at(addr_call, dest); assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); // Patch the constant in the call's trampoline stub. @@ -81,7 +195,7 @@ } // Patch the call. - if (Assembler::reachable_from_branch_at(addr_call, dest)) { + if (reachable) { set_destination(dest); } else { assert (trampoline_stub_addr != NULL, "we need a trampoline"); @@ -103,9 +217,11 @@ is_NativeCallTrampolineStub_at(bl_destination)) return bl_destination; - // If the codeBlob is not a nmethod, this is because we get here from the - // CodeBlob constructor, which is called within the nmethod constructor. - return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); + if (code->is_nmethod()) { + return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); + } + + return NULL; } // Inserts a native call instruction at a given pc @@ -340,9 +456,16 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); - assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() - || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), - "Aarch64 cannot replace non-jump with jump"); + +#ifdef ASSERT + // This may be the temporary nmethod generated while we're AOT + // compiling. Such an nmethod doesn't begin with a NOP but with an ADRP. + if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) { + assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() + || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), + "Aarch64 cannot replace non-jump with jump"); + } +#endif // Patch this nmethod atomically. if (Assembler::reachable_from_branch_at(verified_entry, dest)) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,8 @@ void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } + void wrote(int offset); + public: // unit test stuff @@ -148,6 +150,46 @@ return (NativeInstruction*)address; } +class NativePltCall: public NativeInstruction { +public: + enum Arm_specific_constants { + instruction_size = 4, + instruction_offset = 0, + displacement_offset = 1, + return_address_offset = 4 + }; + address instruction_address() const { return addr_at(instruction_offset); } + address next_instruction_address() const { return addr_at(return_address_offset); } + address displacement_address() const { return addr_at(displacement_offset); } + int displacement() const { return (jint) int_at(displacement_offset); } + address return_address() const { return addr_at(return_address_offset); } + address destination() const; + address plt_entry() const; + address plt_jump() const; + address plt_load_got() const; + address plt_resolve_call() const; + address plt_c2i_stub() const; + void set_stub_to_clean(); + + void reset_to_plt_resolve_call(); + void set_destination_mt_safe(address dest); + + void verify() const; +}; + +inline NativePltCall* nativePltCall_at(address address) { + NativePltCall* call = (NativePltCall*) address; +#ifdef ASSERT + call->verify(); +#endif + return call; +} + +inline NativePltCall* nativePltCall_before(address addr) { + address at = addr - NativePltCall::instruction_size; + return nativePltCall_at(at); +} + inline NativeCall* nativeCall_at(address address); // The NativeCall is an abstraction for accessing/manipulating native // call instructions (used to manipulate inline caches, primitive & @@ -169,7 +211,7 @@ address return_address() const { return addr_at(return_address_offset); } address destination() const; - void set_destination(address dest) { + void set_destination(address dest) { int offset = dest - instruction_address(); unsigned int insn = 0b100101 << 26; assert((offset & 3) == 0, "should be"); @@ -191,6 +233,12 @@ return is_call_at(return_address - NativeCall::return_address_offset); } +#if INCLUDE_AOT + static bool is_far_call(address instr, address target) { + return !Assembler::reachable_from_branch_at(instr, target); + } +#endif + // MT-safe patching of a call instruction. static void insert(address code_pos, address entry); @@ -381,6 +429,39 @@ static void test() {} }; +// adrp x16, #page +// add x16, x16, #offset +// ldr x16, [x16] +class NativeLoadGot: public NativeInstruction { +public: + enum AArch64_specific_constants { + instruction_length = 4 * NativeInstruction::instruction_size, + offset_offset = 0, + }; + + address instruction_address() const { return addr_at(0); } + address return_address() const { return addr_at(instruction_length); } + address got_address() const; + address next_instruction_address() const { return return_address(); } + intptr_t data() const; + void set_data(intptr_t data) { + intptr_t *addr = (intptr_t *) got_address(); + *addr = data; + } + + void verify() const; +private: + void report_and_fail() const; +}; + +inline NativeLoadGot* nativeLoadGot_at(address addr) { + NativeLoadGot* load = (NativeLoadGot*) addr; +#ifdef ASSERT + load->verify(); +#endif + return load; +} + class NativeJump: public NativeInstruction { public: enum AArch64_specific_constants { @@ -441,6 +522,31 @@ return jump; } +class NativeGotJump: public NativeInstruction { +public: + enum AArch64_specific_constants { + instruction_size = 4 * NativeInstruction::instruction_size, + }; + + void verify() const; + address instruction_address() const { return addr_at(0); } + address destination() const; + address return_address() const { return addr_at(instruction_size); } + address got_address() const; + address next_instruction_address() const { return addr_at(instruction_size); } + bool is_GotJump() const; + + void set_jump_destination(address dest) { + address* got = (address *)got_address(); + *got = dest; + } +}; + +inline NativeGotJump* nativeGotJump_at(address addr) { + NativeGotJump* jump = (NativeGotJump*)(addr); + return jump; +} + class NativePopReg : public NativeInstruction { public: // Insert a pop instruction diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -560,7 +560,7 @@ __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset()))); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { // check if this call should be routed towards a specific entry point __ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); Label no_alternative_target; @@ -2278,7 +2278,7 @@ // Setup code generation tools int pad = 0; #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { pad += 512; // Increase the buffer size when compiling for JVMCI } #endif @@ -2360,7 +2360,7 @@ int implicit_exception_uncommon_trap_offset = 0; int uncommon_trap_offset = 0; - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { implicit_exception_uncommon_trap_offset = __ pc() - start; __ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); @@ -2486,7 +2486,7 @@ __ reset_last_Java_frame(false); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { __ bind(after_fetch_unroll_info_call); } #endif @@ -2644,7 +2644,7 @@ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -1816,13 +1816,13 @@ __ align(OptoLoopAlignment); __ BIND(L_store_element); - __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop); // store the oop + __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW); // store the oop __ sub(count, count, 1); __ cbz(count, L_do_card_marks); // ======== loop entry is here ======== __ BIND(L_load_element); - __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8)); // load the oop + __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop __ cbz(copied_oop, L_store_element); __ load_klass(r19_klass, copied_oop);// query the object klass diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -333,16 +333,17 @@ return entry; } -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( - const char* name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); // expression stack must be empty before entering the VM if an // exception happened __ empty_expression_stack(); // setup parameters + // ??? convention: expect aberrant index in register r1 __ movw(c_rarg2, r1); - __ mov(c_rarg1, (address)name); + // ??? convention: expect array in register r3 + __ mov(c_rarg1, r3); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime:: @@ -483,7 +484,7 @@ #if INCLUDE_JVMCI // Check if we need to take lock at entry of synchronized method. This can // only occur on method entry so emit it only for vtos with step 0. - if (EnableJVMCI && state == vtos && step == 0) { + if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) { Label L; __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); __ cbz(rscratch1, L); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/aarch64/templateTable_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue May 15 10:13:52 2018 -0700 @@ -147,16 +147,14 @@ Register val, DecoratorSet decorators) { assert(val == noreg || val == r0, "parameter is just for looks"); - BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->store_at(_masm, decorators, T_OBJECT, dst, val, /*tmp1*/ r10, /*tmp2*/ r1); + __ store_heap_oop(dst, val, r10, r1, decorators); } static void do_oop_load(InterpreterMacroAssembler* _masm, Address src, Register dst, DecoratorSet decorators) { - BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); - bs->load_at(_masm, decorators, T_OBJECT, dst, src, /*tmp1*/ r10, /*tmp_thread*/ r1); + __ load_heap_oop(dst, src, r10, r1, decorators); } Address TemplateTable::at_bcp(int offset) { @@ -747,6 +745,8 @@ } Label ok; __ br(Assembler::LO, ok); + // ??? convention: move array into r3 for exception message + __ mov(r3, array); __ mov(rscratch1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); __ br(rscratch1); __ bind(ok); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -50,14 +50,18 @@ // TODO: ARM - is it possible to inline these stubs into the main code stream? -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) - : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) - , _index(index) -{ - _info = info == NULL ? NULL : new CodeEmitInfo(info); + +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); } +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); @@ -73,7 +77,7 @@ return; } // Pass the array index on stack because all registers must be preserved - ce->verify_reserved_argument_area_size(1); + ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2); if (_index->is_cpu_register()) { __ str_32(_index->as_register(), Address(SP)); } else { @@ -87,6 +91,7 @@ #endif __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); } else { + __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction? __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); } ce->add_call_info_here(_info); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp --- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -3285,7 +3285,8 @@ } -void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { +void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + assert(patch_code == lir_patch_none, "Patch code not supported"); LIR_Address* addr = addr_opr->as_address_ptr(); if (addr->index()->is_illegal()) { jint c = addr->disp(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/c1_Runtime1_arm.cpp --- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -366,11 +366,15 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { OopMap* oop_map = save_live_registers(sasm); + int call_offset; if (has_argument) { __ ldr(R1, Address(SP, arg1_offset)); + __ ldr(R2, Address(SP, arg2_offset)); + call_offset = __ call_RT(noreg, noreg, target, R1, R2); + } else { + call_offset = __ call_RT(noreg, noreg, target); } - int call_offset = __ call_RT(noreg, noreg, target); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/compiledIC_arm.cpp --- a/src/hotspot/cpu/arm/compiledIC_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,6 +91,11 @@ } #endif // COMPILER2_OR_JVMCI +int CompiledStaticCall::to_trampoline_stub_size() { + // ARM doesn't use trampolines. + return 0; +} + // size of C2 call stub, compiled java to interpretor int CompiledStaticCall::to_interp_stub_size() { return 8 * NativeInstruction::instruction_size; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp --- a/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -52,7 +52,7 @@ Unimplemented(); } -void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) { +void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) { Unimplemented(); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp --- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,18 +185,16 @@ return entry; } -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); // index is in R4_ArrayIndexOutOfBounds_index - InlinedString Lname(name); - // expression stack must be empty before entering the VM if an exception happened __ empty_expression_stack(); // setup parameters - __ ldr_literal(R1, Lname); + // Array expected in R1. __ mov(R2, R4_ArrayIndexOutOfBounds_index); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); @@ -204,7 +202,6 @@ __ nop(); // to avoid filling CPU pipeline with invalid instructions __ nop(); __ should_not_reach_here(); - __ bind_literal(Lname); return entry; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/arm/templateTable_arm.cpp --- a/src/hotspot/cpu/arm/templateTable_arm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Tue May 15 10:13:52 2018 -0700 @@ -398,7 +398,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label fastCase, Done; + Label fastCase, Condy, Done; const Register Rindex = R1_tmp; const Register Rcpool = R2_tmp; @@ -450,15 +450,11 @@ // int, float, String __ bind(fastCase); -#ifdef ASSERT - { Label L; - __ cmp(RtagType, JVM_CONSTANT_Integer); - __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne); - __ b(L, eq); - __ stop("unexpected tag type in ldc"); - __ bind(L); - } -#endif // ASSERT + + __ cmp(RtagType, JVM_CONSTANT_Integer); + __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne); + __ b(Condy, ne); + // itos, ftos __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); __ ldr_u32(R0_tos, Address(Rtemp, base_offset)); @@ -466,6 +462,11 @@ // floats and ints are placed on stack in the same way, so // we can use push(itos) to transfer float value without VFP __ push(itos); + __ b(Done); + + __ bind(Condy); + condy_helper(Done); + __ bind(Done); } @@ -489,6 +490,23 @@ __ call_VM(R0_tos, entry, R1); __ bind(resolved); + { // Check for the null sentinel. + // If we just called the VM, that already did the mapping for us, + // but it's harmless to retry. + Label notNull; + Register result = R0; + Register tmp = R1; + Register rarg = R2; + + // Stash null_sentinel address to get its value later + __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); + __ ldr(tmp, Address(rarg)); + __ cmp(result, tmp); + __ b(notNull, ne); + __ mov(result, 0); // NULL object reference + __ bind(notNull); + } + if (VerifyOops) { __ verify_oop(R0_tos); } @@ -509,8 +527,9 @@ __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord)); + Label Condy, exit; #ifdef __ABI_HARD__ - Label Long, exit; + Label Long; // get type from tags __ add(Rtemp, Rtags, tags_offset); __ ldrb(Rtemp, Address(Rtemp, Rindex)); @@ -523,6 +542,8 @@ __ bind(Long); #endif + __ cmp(Rtemp, JVM_CONSTANT_Long); + __ b(Condy, ne); #ifdef AARCH64 __ ldr(R0_tos, Address(Rbase, base_offset)); #else @@ -530,10 +551,115 @@ __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize)); #endif // AARCH64 __ push(ltos); - -#ifdef __ABI_HARD__ + __ b(exit); + + __ bind(Condy); + condy_helper(exit); + __ bind(exit); +} + + +void TemplateTable::condy_helper(Label& Done) +{ + Register obj = R0_tmp; + Register rtmp = R1_tmp; + Register flags = R2_tmp; + Register off = R3_tmp; + + __ mov(rtmp, (int) bytecode()); + __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp); + __ get_vm_result_2(flags, rtmp); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + __ mov(off, flags); + +#ifdef AARCH64 + __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask); +#else + __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits); + __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits); #endif + + const Address field(obj, off); + + __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift); + // Make sure we don't need to mask flags after the above shift + ConstantPoolCacheEntry::verify_tos_state_shift(); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notIntFloat, notShort, notByte, notChar, notBool; + __ cmp(flags, itos); + __ cond_cmp(flags, ftos, ne); + __ b(notIntFloat, ne); + __ ldr(R0_tos, field); + __ push(itos); + __ b(Done); + + __ bind(notIntFloat); + __ cmp(flags, stos); + __ b(notShort, ne); + __ ldrsh(R0_tos, field); + __ push(stos); + __ b(Done); + + __ bind(notShort); + __ cmp(flags, btos); + __ b(notByte, ne); + __ ldrsb(R0_tos, field); + __ push(btos); + __ b(Done); + + __ bind(notByte); + __ cmp(flags, ctos); + __ b(notChar, ne); + __ ldrh(R0_tos, field); + __ push(ctos); + __ b(Done); + + __ bind(notChar); + __ cmp(flags, ztos); + __ b(notBool, ne); + __ ldrsb(R0_tos, field); + __ push(ztos); + __ b(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLongDouble; + __ cmp(flags, ltos); + __ cond_cmp(flags, dtos, ne); + __ b(notLongDouble, ne); + +#ifdef AARCH64 + __ ldr(R0_tos, field); +#else + __ add(rtmp, obj, wordSize); + __ ldr(R0_tos_lo, Address(obj, off)); + __ ldr(R1_tos_hi, Address(rtmp, off)); +#endif + __ push(ltos); + __ b(Done); + + __ bind(notLongDouble); + + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); } @@ -746,6 +872,7 @@ // convention with generate_ArrayIndexOutOfBounds_handler() __ mov(R4_ArrayIndexOutOfBounds_index, index, hs); } + __ mov(R1, array, hs); __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2015 SAP SE. All rights reserved. + * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,10 +37,14 @@ #define __ ce->masm()-> -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) - : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) - , _index(index) { +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} + +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } @@ -68,12 +72,16 @@ __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); - Register index = R0; // pass in R0 + Register index = R0; if (_index->is_register()) { __ extsw(index, _index->as_register()); } else { __ load_const_optimized(index, _index->as_jint()); } + if (_array) { + __ std(_array->as_pointer_register(), -8, R1_SP); + } + __ std(index, -16, R1_SP); __ bctrl(); ce->add_call_info_here(_info); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Tue May 15 10:13:52 2018 -0700 @@ -2925,7 +2925,8 @@ Unimplemented(); } -void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { +void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + assert(patch_code == lir_patch_none, "Patch code not supported"); LIR_Address* addr = addr_opr->as_address_ptr(); assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); if (addr->index()->is_illegal()) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2015 SAP SE. All rights reserved. + * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -502,8 +502,7 @@ case throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. - __ std(R0, -8, R1_SP); // Pass index on stack. - oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1); + oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2); } break; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/ppc/ppc.ad --- a/src/hotspot/cpu/ppc/ppc.ad Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/ppc/ppc.ad Tue May 15 10:13:52 2018 -0700 @@ -1037,7 +1037,7 @@ // So first get the Proj node, mem_proj, to use it to iterate forward. Node *mem_proj = NULL; for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) { - mem_proj = mba->fast_out(i); // Throw out-of-bounds if proj not found + mem_proj = mba->fast_out(i); // Runs out of bounds and asserts if Proj not found. assert(mem_proj->is_Proj(), "only projections here"); ProjNode *proj = mem_proj->as_Proj(); if (proj->_con == TypeFunc::Memory && diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Tue May 15 10:13:52 2018 -0700 @@ -564,13 +564,13 @@ return entry; } -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); __ empty_expression_stack(); - __ load_const_optimized(R4_ARG2, (address) name); + // R4_ARG2 already contains the array. // Index is in R17_tos. __ mr(R5_ARG3, R17_tos); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException)); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R4_ARG2, R5_ARG3); return entry; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,10 +39,14 @@ #undef CHECK_BAILOUT #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; } -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) : - _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception), - _index(index) { +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} + +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } @@ -71,6 +75,7 @@ stub_id = Runtime1::throw_index_exception_id; } else { stub_id = Runtime1::throw_range_check_failed_id; + __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register()); } ce->emit_call_c(Runtime1::entry_for (stub_id)); CHECK_BAILOUT(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Tue May 15 10:13:52 2018 -0700 @@ -2922,7 +2922,8 @@ Unimplemented(); } -void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { +void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + assert(patch_code == lir_patch_none, "Patch code not supported"); LIR_Address* addr = addr_opr->as_address_ptr(); assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); __ load_address(dest->as_pointer_register(), as_Address(addr)); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/s390/sharedRuntime_s390.cpp --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2017 SAP SE. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -314,8 +314,8 @@ __ save_return_pc(return_pc); // Push a new frame (includes stack linkage). - // use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are - // illegally used to pass parameters (SAPJVM extension) by RangeCheckStub::emit_code(). + // Use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are + // illegally used to pass parameters by RangeCheckStub::emit_code(). __ push_frame(frame_size_in_bytes, return_pc); // We have to restore return_pc right away. // Nobody else will. Furthermore, return_pc isn't necessarily the default (Z_R14). diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp Tue May 15 10:13:52 2018 -0700 @@ -551,9 +551,10 @@ // // Args: +// Z_ARG2: oop of array // Z_ARG3: aberrant index // -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char * name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); address excp = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException); @@ -562,8 +563,7 @@ __ empty_expression_stack(); // Setup parameters. - // Leave out the name and use register for array to create more detailed exceptions. - __ load_absolute_address(Z_ARG2, (address) name); + // Pass register with array to create more detailed exceptions. __ call_VM(noreg, excp, Z_ARG2, Z_ARG3); return entry; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/s390/templateTable_s390.cpp --- a/src/hotspot/cpu/s390/templateTable_s390.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Tue May 15 10:13:52 2018 -0700 @@ -784,7 +784,7 @@ __ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); __ z_brl(index_ok); __ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler(). - // Give back the array to create more detailed exceptions. + // Pass the array to create more detailed exceptions. __ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler(). __ load_absolute_address(Z_R1_scratch, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Tue May 15 10:13:52 2018 -0700 @@ -35,15 +35,17 @@ #define __ ce->masm()-> -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) - : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) - , _index(index) -{ +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); @@ -66,6 +68,7 @@ if (_throw_index_out_of_bounds_exception) { __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); } else { + __ mov(_array->as_pointer_register(), G5); __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); } __ delayed()->nop(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Tue May 15 10:13:52 2018 -0700 @@ -3195,7 +3195,7 @@ __ srl (rs, 0, rd->successor()); } -void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { +void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { const LIR_Address* addr = addr_opr->as_address_ptr(); assert(addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); const Register dest_reg = dest->as_pointer_register(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Tue May 15 10:13:52 2018 -0700 @@ -302,7 +302,7 @@ if (!has_argument) { call_offset = __ call_RT(noreg, noreg, target); } else { - call_offset = __ call_RT(noreg, noreg, target, G4); + call_offset = __ call_RT(noreg, noreg, target, G4, G5); } OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/sparc/interp_masm_sparc.cpp --- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Tue May 15 10:13:52 2018 -0700 @@ -881,27 +881,32 @@ assert_not_delayed(); verify_oop(array); - // sign extend since tos (index) can be a 32bit value + // Sign extend since tos (index) can be a 32bit value. sra(index, G0, index); - // check array + // Check array. Label ptr_ok; tst(array); - throw_if_not_1_x( notZero, ptr_ok ); - delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index - throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); + throw_if_not_1_x(notZero, ptr_ok); + delayed()->ld(array, arrayOopDesc::length_offset_in_bytes(), tmp); // Check index. + throw_if_not_2(Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); Label index_ok; cmp(index, tmp); - throw_if_not_1_icc( lessUnsigned, index_ok ); - if (index_shift > 0) delayed()->sll(index, index_shift, index); - else delayed()->add(array, index, res); // addr - const offset in index - // convention: move aberrant index into G3_scratch for exception message - mov(index, G3_scratch); - throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); + throw_if_not_1_icc(lessUnsigned, index_ok); + if (index_shift > 0) { + delayed()->sll(index, index_shift, index); + } else { + delayed()->add(array, index, res); // addr - const offset in index + } + // Pass the array to create more detailed exceptions. + // Convention: move aberrant index into Otos_i for exception message. + mov(index, Otos_i); + mov(array, G3_scratch); + throw_if_not_2(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); // add offset if didn't do it in delay slot - if (index_shift > 0) add(array, index, res); // addr - const offset in index + if (index_shift > 0) { add(array, index, res); } // addr - const offset in index } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp --- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp Tue May 15 10:13:52 2018 -0700 @@ -255,15 +255,14 @@ } -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); // expression stack must be empty before entering the VM if an exception happened __ empty_expression_stack(); + // Pass the array to create more detailed exceptions. // convention: expect aberrant index in register G3_scratch, then shuffle the // index to G4_scratch for the VM call - __ mov(G3_scratch, G4_scratch); - __ set((intptr_t)name, G3_scratch); - __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); + __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, Otos_i); __ should_not_reach_here(); return entry; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/assembler_x86.cpp --- a/src/hotspot/cpu/x86/assembler_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/assembler_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -8981,6 +8981,13 @@ emit_arith(0x85, 0xC0, dst, src); } +void Assembler::testq(Register dst, Address src) { + InstructionMark im(this); + prefixq(src, dst); + emit_int8((unsigned char)0x85); + emit_operand(dst, src); +} + void Assembler::xaddq(Address dst, Register src) { InstructionMark im(this); prefixq(dst, src); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/assembler_x86.hpp --- a/src/hotspot/cpu/x86/assembler_x86.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/assembler_x86.hpp Tue May 15 10:13:52 2018 -0700 @@ -1813,6 +1813,7 @@ void testq(Register dst, int32_t imm32); void testq(Register dst, Register src); + void testq(Register dst, Address src); // BMI - count trailing zeros void tzcntl(Register dst, Register src); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -88,15 +88,17 @@ __ jmp(_continuation); } -RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, - bool throw_index_out_of_bounds_exception) - : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) - , _index(index) -{ +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) + : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) + : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + assert(info != NULL, "must have info"); + _info = new CodeEmitInfo(info); +} void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); @@ -120,6 +122,7 @@ stub_id = Runtime1::throw_index_exception_id; } else { stub_id = Runtime1::throw_range_check_failed_id; + ce->store_parameter(_array->as_pointer_register(), 1); } __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); ce->add_call_info_here(_info); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -3786,11 +3786,22 @@ } -void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { - assert(addr->is_address() && dest->is_register(), "check"); - Register reg; - reg = dest->as_pointer_register(); - __ lea(reg, as_Address(addr->as_address_ptr())); +void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + assert(src->is_address(), "must be an address"); + assert(dest->is_register(), "must be a register"); + + PatchingStub* patch = NULL; + if (patch_code != lir_patch_none) { + patch = new PatchingStub(_masm, PatchingStub::access_field_id); + } + + Register reg = dest->as_pointer_register(); + LIR_Address* addr = src->as_address_ptr(); + __ lea(reg, as_Address(addr)); + + if (patch != NULL) { + patching_epilog(patch, patch_code, addr->base()->as_register(), info); + } } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/c1_Runtime1_x86.cpp --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -611,26 +611,29 @@ } -// target: the entry point of the method that creates and posts the exception oop -// has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) - +// Target: the entry point of the method that creates and posts the exception oop. +// has_argument: true if the exception needs arguments (passed on the stack because +// registers must be preserved). OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { - // preserve all registers - int num_rt_args = has_argument ? 2 : 1; + // Preserve all registers. + int num_rt_args = has_argument ? (2 + 1) : 1; OopMap* oop_map = save_live_registers(sasm, num_rt_args); - // now all registers are saved and can be used freely - // verify that no old value is used accidentally + // Now all registers are saved and can be used freely. + // Verify that no old value is used accidentally. __ invalidate_registers(true, true, true, true, true, true); - // registers used by this stub + // Registers used by this stub. const Register temp_reg = rbx; - // load argument for exception that is passed as an argument into the stub + // Load arguments for exception that are passed as arguments into the stub. if (has_argument) { #ifdef _LP64 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); + __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord)); #else + __ movptr(temp_reg, Address(rbp, 3*BytesPerWord)); + __ push(temp_reg); __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); __ push(temp_reg); #endif // _LP64 diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/interp_masm_x86.cpp --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -504,18 +504,15 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index, Register tmp) { assert_different_registers(result, index); - // convert from field index to resolved_references() index and from - // word index to byte offset. Since this is a java object, it can be compressed - shll(index, LogBytesPerHeapOop); get_constant_pool(result); // load pointer for resolved_references[] objArray movptr(result, Address(result, ConstantPool::cache_offset_in_bytes())); movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes())); resolve_oop_handle(result, tmp); - // Add in the index - addptr(result, index); - load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp); + load_heap_oop(result, Address(result, index, + UseCompressedOops ? Address::times_4 : Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp); } // load cpool->resolved_klass_at(index) diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/macroAssembler_x86.hpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Tue May 15 10:13:52 2018 -0700 @@ -836,6 +836,7 @@ void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } + void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } void testptr(Register src1, Register src2); void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/nativeInst_x86.cpp --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -436,6 +436,8 @@ case instruction_code_reg2memb: // 0x88 case instruction_code_mem2regb: // 0x8a + case instruction_code_lea: // 0x8d + case instruction_code_float_s: // 0xd9 fld_s a case instruction_code_float_d: // 0xdd fld_d a @@ -508,6 +510,9 @@ case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a break; + case instruction_code_lea: // 0x8d lea r, a + break; + default: fatal ("not a mov [reg+offs], reg instruction"); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/nativeInst_x86.hpp --- a/src/hotspot/cpu/x86/nativeInst_x86.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp Tue May 15 10:13:52 2018 -0700 @@ -354,6 +354,8 @@ instruction_code_xmm_store = 0x11, instruction_code_xmm_lpd = 0x12, + instruction_code_lea = 0x8d, + instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes, instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes, instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes, diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -102,16 +102,16 @@ return entry; } -address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( - const char* name) { +address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { address entry = __ pc(); - // expression stack must be empty before entering the VM if an - // exception happened + // The expression stack must be empty before entering the VM if an + // exception happened. __ empty_expression_stack(); - // setup parameters - // ??? convention: expect aberrant index in register ebx + + // Setup parameters. + // ??? convention: expect aberrant index in register ebx/rbx. + // Pass array to create more detailed exceptions. Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); - __ lea(rarg, ExternalAddress((address)name)); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime:: diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Tue May 15 10:13:52 2018 -0700 @@ -757,11 +757,14 @@ assert(rbx != array, "different registers"); __ movl(rbx, index); } - __ jump_cc(Assembler::aboveEqual, - ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); + Label skip; + __ jccb(Assembler::below, skip); + // Pass array to create more detailed exceptions. + __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array); + __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); + __ bind(skip); } - void TemplateTable::iaload() { transition(itos, itos); // rax: index @@ -1109,8 +1112,6 @@ __ load_klass(rax, rdx); __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset())); - // Compress array + index*oopSize + 12 into a single register. Frees rcx. - __ lea(rdx, element_address); // Generate subtype check. Blows rcx, rdi // Superklass in rax. Subklass in rbx. @@ -1125,8 +1126,9 @@ // Get the value we will store __ movptr(rax, at_tos()); + __ movl(rcx, at_tos_p1()); // index // Now store using the appropriate barrier - do_oop_store(_masm, Address(rdx, 0), rax, IN_HEAP_ARRAY); + do_oop_store(_masm, element_address, rax, IN_HEAP_ARRAY); __ jmp(done); // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/aix/attachListener_aix.cpp --- a/src/hotspot/os/aix/attachListener_aix.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/aix/attachListener_aix.cpp Tue May 15 10:13:52 2018 -0700 @@ -235,7 +235,12 @@ if (res == 0) { RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res); if (res == 0) { - res = ::rename(initial_path, path); + // make sure the file is owned by the effective user and effective group + // e.g. the group could be inherited from the directory in case the s bit is set + RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res); + if (res == 0) { + res = ::rename(initial_path, path); + } } } if (res == -1) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/aix/perfMemory_aix.cpp --- a/src/hotspot/os/aix/perfMemory_aix.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/aix/perfMemory_aix.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2016 SAP SE. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1246,7 +1246,7 @@ *sizep = size; log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at " - INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress)); + INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress)); } // create the PerfData memory region diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/bsd/attachListener_bsd.cpp --- a/src/hotspot/os/bsd/attachListener_bsd.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/bsd/attachListener_bsd.cpp Tue May 15 10:13:52 2018 -0700 @@ -215,7 +215,8 @@ RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res); if (res == 0) { // make sure the file is owned by the effective user and effective group - // (this is the default on linux, but not on mac os) + // e.g. default behavior on mac is that new files inherit the group of + // the directory that they are created in RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res); if (res == 0) { res = ::rename(initial_path, path); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/bsd/perfMemory_bsd.cpp --- a/src/hotspot/os/bsd/perfMemory_bsd.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/bsd/perfMemory_bsd.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1152,7 +1152,7 @@ *sizep = size; log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at " - INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress)); + INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress)); } // create the PerfData memory region diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/linux/attachListener_linux.cpp --- a/src/hotspot/os/linux/attachListener_linux.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/linux/attachListener_linux.cpp Tue May 15 10:13:52 2018 -0700 @@ -215,7 +215,12 @@ if (res == 0) { RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res); if (res == 0) { - res = ::rename(initial_path, path); + // make sure the file is owned by the effective user and effective group + // e.g. the group could be inherited from the directory in case the s bit is set + RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res); + if (res == 0) { + res = ::rename(initial_path, path); + } } } if (res == -1) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/linux/perfMemory_linux.cpp --- a/src/hotspot/os/linux/perfMemory_linux.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/linux/perfMemory_linux.cpp Tue May 15 10:13:52 2018 -0700 @@ -1241,7 +1241,7 @@ *sizep = size; log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at " - INTPTR_FORMAT "\n", size, vmid, p2i((void*)mapAddress)); + INTPTR_FORMAT, size, vmid, p2i((void*)mapAddress)); } // create the PerfData memory region diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/solaris/perfMemory_solaris.cpp --- a/src/hotspot/os/solaris/perfMemory_solaris.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/solaris/perfMemory_solaris.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1186,7 +1186,7 @@ *sizep = size; log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at " - INTPTR_FORMAT "\n", size, vmid, (void*)mapAddress); + INTPTR_FORMAT, size, vmid, (void*)mapAddress); } // create the PerfData memory region diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/windows/os_windows.cpp Tue May 15 10:13:52 2018 -0700 @@ -4417,10 +4417,11 @@ return false; } strcpy(search_path, path); + os::native_path(search_path); // Append "*", or possibly "\\*", to path - if (path[1] == ':' && - (path[2] == '\0' || - (path[2] == '\\' && path[3] == '\0'))) { + if (search_path[1] == ':' && + (search_path[2] == '\0' || + (search_path[2] == '\\' && search_path[3] == '\0'))) { // No '\\' needed for cases like "Z:" or "Z:\" strcat(search_path, "*"); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/os/windows/perfMemory_windows.cpp --- a/src/hotspot/os/windows/perfMemory_windows.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/os/windows/perfMemory_windows.cpp Tue May 15 10:13:52 2018 -0700 @@ -1697,7 +1697,7 @@ CloseHandle(fmh); log_debug(perf, memops)("mapped " SIZE_FORMAT " bytes for vmid %d at " - INTPTR_FORMAT "\n", size, vmid, mapAddress); + INTPTR_FORMAT, size, vmid, mapAddress); } // this method unmaps the the mapped view of the the diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/asm/codeBuffer.hpp --- a/src/hotspot/share/asm/codeBuffer.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/asm/codeBuffer.hpp Tue May 15 10:13:52 2018 -0700 @@ -382,6 +382,10 @@ address _last_insn; // used to merge consecutive memory barriers, loads or stores. +#if INCLUDE_AOT + bool _immutable_PIC; +#endif + address _decode_begin; // start address for decode address decode_begin(); @@ -396,6 +400,9 @@ _overflow_arena = NULL; _code_strings = CodeStrings(); _last_insn = NULL; +#if INCLUDE_AOT + _immutable_PIC = false; +#endif } void initialize(address code_start, csize_t code_size) { @@ -629,6 +636,13 @@ // Log a little info about section usage in the CodeBuffer void log_section_sizes(const char* name); +#if INCLUDE_AOT + // True if this is a code buffer used for immutable PIC, i.e. AOT + // compilation. + bool immutable_PIC() { return _immutable_PIC; } + void set_immutable_PIC(bool pic) { _immutable_PIC = pic; } +#endif + #ifndef PRODUCT public: // Printing / Decoding diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_CodeStubs.hpp --- a/src/hotspot/share/c1/c1_CodeStubs.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_CodeStubs.hpp Tue May 15 10:13:52 2018 -0700 @@ -147,10 +147,14 @@ private: CodeEmitInfo* _info; LIR_Opr _index; + LIR_Opr _array; bool _throw_index_out_of_bounds_exception; public: - RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, bool throw_index_out_of_bounds_exception = false); + // For ArrayIndexOutOfBoundsException. + RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array); + // For IndexOutOfBoundsException. + RangeCheckStub(CodeEmitInfo* info, LIR_Opr index); virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } virtual bool is_exception_throw_stub() const { return true; } @@ -158,6 +162,7 @@ virtual void visit(LIR_OpVisitState* visitor) { visitor->do_slow_case(_info); visitor->do_input(_index); + if (_array) { visitor->do_input(_array); } } #ifndef PRODUCT virtual void print_name(outputStream* out) const { out->print("RangeCheckStub"); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_LIR.hpp --- a/src/hotspot/share/c1/c1_LIR.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_LIR.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2074,7 +2074,7 @@ void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); } void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); } - void leal(LIR_Opr from, LIR_Opr result_reg) { append(new LIR_Op1(lir_leal, from, result_reg)); } + void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); } // result is a stack location for old backend and vreg for UseLinearScan // stack_loc_temp is an illegal register for old backend diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_LIRAssembler.cpp --- a/src/hotspot/share/c1/c1_LIRAssembler.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -276,7 +276,8 @@ // branches since they include block and stub names. Also print // patching moves since they generate funny looking code. if (op->code() == lir_branch || - (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { + (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || + (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { stringStream st; op->print_on(&st); _masm->block_comment(st.as_string()); @@ -554,7 +555,7 @@ break; case lir_leal: - leal(op->in_opr(), op->result_opr()); + leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); break; case lir_null_check: { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_LIRAssembler.hpp --- a/src/hotspot/share/c1/c1_LIRAssembler.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp Tue May 15 10:13:52 2018 -0700 @@ -240,7 +240,7 @@ void align_call(LIR_Code code); void negate(LIR_Opr left, LIR_Opr dest); - void leal(LIR_Opr left, LIR_Opr dest); + void leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info); void rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Tue May 15 10:13:52 2018 -0700 @@ -480,7 +480,7 @@ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { - CodeStub* stub = new RangeCheckStub(range_check_info, index); + CodeStub* stub = new RangeCheckStub(range_check_info, index, array); if (index->is_constant()) { cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), index->as_jint(), null_check_info); @@ -494,7 +494,7 @@ void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { - CodeStub* stub = new RangeCheckStub(info, index, true); + CodeStub* stub = new RangeCheckStub(info, index); if (index->is_constant()) { cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch @@ -1592,7 +1592,7 @@ if (GenerateRangeChecks && needs_range_check) { if (use_length) { __ cmp(lir_cond_belowEqual, length.result(), index.result()); - __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); + __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result())); } else { array_range_check(array.result(), index.result(), null_check_info, range_check_info); // range_check also does the null check @@ -1756,7 +1756,7 @@ LIR_Opr result = rlock_result(x); if (GenerateRangeChecks) { CodeEmitInfo* info = state_for(x); - CodeStub* stub = new RangeCheckStub(info, index.result(), true); + CodeStub* stub = new RangeCheckStub(info, index.result()); if (index.result()->is_constant()) { cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); __ branch(lir_cond_belowEqual, T_INT, stub); @@ -1837,12 +1837,12 @@ if (GenerateRangeChecks && needs_range_check) { if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) { - __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result())); + __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result())); } else if (use_length) { // TODO: use a (modified) version of array_range_check that does not require a // constant length to be loaded to a register __ cmp(lir_cond_belowEqual, length.result(), index.result()); - __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); + __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result())); } else { array_range_check(array.result(), index.result(), null_check_info, range_check_info); // The range check performs the null check, so clear it out for the load diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_Runtime1.cpp --- a/src/hotspot/share/c1/c1_Runtime1.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_Runtime1.cpp Tue May 15 10:13:52 2018 -0700 @@ -641,10 +641,12 @@ } -JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index)) +JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index, arrayOopDesc* a)) NOT_PRODUCT(_throw_range_check_exception_count++;) - char message[jintAsStringSize]; - sprintf(message, "%d", index); + const int len = 35; + assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message."); + char message[2 * jintAsStringSize + len]; + sprintf(message, "Index %d out of bounds for length %d", index, a->length()); SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message); JRT_END diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/c1/c1_Runtime1.hpp --- a/src/hotspot/share/c1/c1_Runtime1.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/c1/c1_Runtime1.hpp Tue May 15 10:13:52 2018 -0700 @@ -149,7 +149,7 @@ static address exception_handler_for_pc(JavaThread* thread); - static void throw_range_check_exception(JavaThread* thread, int index); + static void throw_range_check_exception(JavaThread* thread, int index, arrayOopDesc* a); static void throw_index_exception(JavaThread* thread, int index); static void throw_div0_exception(JavaThread* thread); static void throw_null_pointer_exception(JavaThread* thread); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/classLoader.cpp --- a/src/hotspot/share/classfile/classLoader.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/classLoader.cpp Tue May 15 10:13:52 2018 -0700 @@ -1552,56 +1552,63 @@ PackageEntry* pkg_entry = ik->package(); if (FileMapInfo::get_number_of_shared_paths() > 0) { - char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN); + char* canonical_path_table_entry = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN); // save the path from the file: protocol or the module name from the jrt: protocol // if no protocol prefix is found, path is the same as stream->source() char* path = skip_uri_protocol(src); + char* canonical_class_src_path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN); + if (!get_canonical_path(path, canonical_class_src_path, JVM_MAXPATHLEN)) { + tty->print_cr("Bad pathname %s. CDS dump aborted.", path); + vm_exit(1); + } for (int i = 0; i < FileMapInfo::get_number_of_shared_paths(); i++) { SharedClassPathEntry* ent = FileMapInfo::shared_path(i); - if (get_canonical_path(ent->name(), canonical_path, JVM_MAXPATHLEN)) { - // If the path (from the class stream source) is the same as the shared - // class or module path, then we have a match. - if (strcmp(canonical_path, os::native_path((char*)path)) == 0) { - // NULL pkg_entry and pkg_entry in an unnamed module implies the class - // is from the -cp or boot loader append path which consists of -Xbootclasspath/a - // and jvmti appended entries. - if ((pkg_entry == NULL) || (pkg_entry->in_unnamed_module())) { - // Ensure the index is within the -cp range before assigning - // to the classpath_index. - if (SystemDictionary::is_system_class_loader(loader) && - (i >= ClassLoaderExt::app_class_paths_start_index()) && - (i < ClassLoaderExt::app_module_paths_start_index())) { + if (!get_canonical_path(ent->name(), canonical_path_table_entry, JVM_MAXPATHLEN)) { + tty->print_cr("Bad pathname %s. CDS dump aborted.", ent->name()); + vm_exit(1); + } + // If the path (from the class stream source) is the same as the shared + // class or module path, then we have a match. + if (strcmp(canonical_path_table_entry, canonical_class_src_path) == 0) { + // NULL pkg_entry and pkg_entry in an unnamed module implies the class + // is from the -cp or boot loader append path which consists of -Xbootclasspath/a + // and jvmti appended entries. + if ((pkg_entry == NULL) || (pkg_entry->in_unnamed_module())) { + // Ensure the index is within the -cp range before assigning + // to the classpath_index. + if (SystemDictionary::is_system_class_loader(loader) && + (i >= ClassLoaderExt::app_class_paths_start_index()) && + (i < ClassLoaderExt::app_module_paths_start_index())) { + classpath_index = i; + break; + } else { + if ((i >= 1) && + (i < ClassLoaderExt::app_class_paths_start_index())) { + // The class must be from boot loader append path which consists of + // -Xbootclasspath/a and jvmti appended entries. + assert(loader == NULL, "sanity"); classpath_index = i; break; - } else { - if ((i >= 1) && - (i < ClassLoaderExt::app_class_paths_start_index())) { - // The class must be from boot loader append path which consists of - // -Xbootclasspath/a and jvmti appended entries. - assert(loader == NULL, "sanity"); - classpath_index = i; - break; - } } - } else { - // A class from a named module from the --module-path. Ensure the index is - // within the --module-path range before assigning to the classpath_index. - if ((pkg_entry != NULL) && !(pkg_entry->in_unnamed_module()) && (i > 0)) { - if (i >= ClassLoaderExt::app_module_paths_start_index() && - i < FileMapInfo::get_number_of_shared_paths()) { - classpath_index = i; - break; - } + } + } else { + // A class from a named module from the --module-path. Ensure the index is + // within the --module-path range before assigning to the classpath_index. + if ((pkg_entry != NULL) && !(pkg_entry->in_unnamed_module()) && (i > 0)) { + if (i >= ClassLoaderExt::app_module_paths_start_index() && + i < FileMapInfo::get_number_of_shared_paths()) { + classpath_index = i; + break; } } } - // for index 0 and the stream->source() is the modules image or has the jrt: protocol. - // The class must be from the runtime modules image. - if (i == 0 && (is_modules_image(src) || string_starts_with(src, "jrt:"))) { - classpath_index = i; - break; - } + } + // for index 0 and the stream->source() is the modules image or has the jrt: protocol. + // The class must be from the runtime modules image. + if (i == 0 && (is_modules_image(src) || string_starts_with(src, "jrt:"))) { + classpath_index = i; + break; } } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/classLoaderExt.cpp --- a/src/hotspot/share/classfile/classLoaderExt.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/classLoaderExt.cpp Tue May 15 10:13:52 2018 -0700 @@ -79,12 +79,11 @@ } void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) { - ResourceMark rm; + ResourceMark rm(THREAD); for (int i = 0; i < met->table_size(); i++) { for (ModuleEntry* m = met->bucket(i); m != NULL;) { char* path = m->location()->as_C_string(); - if (strncmp(path, "file:", 5) == 0 && ClassLoader::string_ends_with(path, ".jar")) { - m->print(); + if (strncmp(path, "file:", 5) == 0) { path = ClassLoader::skip_uri_protocol(path); ClassLoader::setup_module_search_path(path, THREAD); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/javaClasses.cpp Tue May 15 10:13:52 2018 -0700 @@ -768,7 +768,7 @@ { assert(fd->signature() == vmSymbols::string_signature(), "just checking"); - if (DumpSharedSpaces && oopDesc::is_archive_object(mirror())) { + if (DumpSharedSpaces && MetaspaceShared::is_archive_object(mirror())) { // Archive the String field and update the pointer. oop s = mirror()->obj_field(fd->offset()); oop archived_s = StringTable::create_archived_string(s, CHECK); @@ -809,7 +809,7 @@ if (MetaspaceShared::open_archive_heap_region_mapped()) { oop m = k->archived_java_mirror(); assert(m != NULL, "archived mirror is NULL"); - assert(oopDesc::is_archive_object(m), "must be archived mirror object"); + assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object"); Handle m_h(THREAD, m); // restore_archived_mirror() clears the klass' _has_raw_archived_mirror flag restore_archived_mirror(k, m_h, Handle(), Handle(), Handle(), CHECK); @@ -3556,34 +3556,6 @@ base->long_field_put(static_clock_offset, value); } -// Support for java_lang_ref_ReferenceQueue - -oop java_lang_ref_ReferenceQueue::NULL_queue() { - InstanceKlass* ik = SystemDictionary::ReferenceQueue_klass(); - oop mirror = ik->java_mirror(); - return mirror->obj_field(static_NULL_queue_offset); -} - -oop java_lang_ref_ReferenceQueue::ENQUEUED_queue() { - InstanceKlass* ik = SystemDictionary::ReferenceQueue_klass(); - oop mirror = ik->java_mirror(); - return mirror->obj_field(static_ENQUEUED_queue_offset); -} - -void java_lang_ref_ReferenceQueue::compute_offsets() { - InstanceKlass* k = SystemDictionary::ReferenceQueue_klass(); - compute_offset(static_NULL_queue_offset, - k, - vmSymbols::referencequeue_null_name(), - vmSymbols::referencequeue_signature(), - true /* is_static */); - compute_offset(static_ENQUEUED_queue_offset, - k, - vmSymbols::referencequeue_enqueued_name(), - vmSymbols::referencequeue_signature(), - true /* is_static */); -} - // Support for java_lang_invoke_DirectMethodHandle int java_lang_invoke_DirectMethodHandle::_member_offset; @@ -4263,8 +4235,6 @@ int java_lang_ref_Reference::queue_offset; int java_lang_ref_Reference::next_offset; int java_lang_ref_Reference::discovered_offset; -int java_lang_ref_ReferenceQueue::static_NULL_queue_offset; -int java_lang_ref_ReferenceQueue::static_ENQUEUED_queue_offset; int java_lang_ref_SoftReference::timestamp_offset; int java_lang_ref_SoftReference::static_clock_offset; int java_lang_ClassLoader::parent_offset; @@ -4509,7 +4479,6 @@ java_lang_StackTraceElement::compute_offsets(); java_lang_StackFrameInfo::compute_offsets(); java_lang_LiveStackFrameInfo::compute_offsets(); - java_lang_ref_ReferenceQueue::compute_offsets(); // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/javaClasses.hpp --- a/src/hotspot/share/classfile/javaClasses.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/javaClasses.hpp Tue May 15 10:13:52 2018 -0700 @@ -946,20 +946,6 @@ static void serialize(SerializeClosure* f) NOT_CDS_RETURN; }; -// Interface to java.lang.ref.ReferenceQueue objects - -class java_lang_ref_ReferenceQueue: public AllStatic { -public: - static int static_NULL_queue_offset; - static int static_ENQUEUED_queue_offset; - - // Accessors - static oop NULL_queue(); - static oop ENQUEUED_queue(); - - static void compute_offsets(); -}; - // Interface to java.lang.invoke.MethodHandle objects class MethodHandleEntry; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/javaClasses.inline.hpp --- a/src/hotspot/share/classfile/javaClasses.inline.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp Tue May 15 10:13:52 2018 -0700 @@ -127,12 +127,6 @@ HeapWord* java_lang_ref_Reference::discovered_addr_raw(oop ref) { return ref->obj_field_addr_raw(discovered_offset); } -oop java_lang_ref_Reference::queue(oop ref) { - return ref->obj_field(queue_offset); -} -void java_lang_ref_Reference::set_queue(oop ref, oop value) { - return ref->obj_field_put(queue_offset, value); -} bool java_lang_ref_Reference::is_phantom(oop ref) { return InstanceKlass::cast(ref->klass())->reference_type() == REF_PHANTOM; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/stringTable.cpp Tue May 15 10:13:52 2018 -0700 @@ -28,12 +28,13 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" -#include "gc/shared/collectedHeap.inline.hpp" +#include "gc/shared/collectedHeap.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" +#include "memory/universe.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" @@ -44,9 +45,6 @@ #include "services/diagnosticCommand.hpp" #include "utilities/hashtable.inline.hpp" #include "utilities/macros.hpp" -#if INCLUDE_G1GC -#include "gc/g1/g1StringDedup.hpp" -#endif // the number of buckets a thread claims const int ClaimChunkSize = 32; @@ -260,14 +258,10 @@ string = java_lang_String::create_from_unicode(name, len, CHECK_NULL); } -#if INCLUDE_G1GC - if (G1StringDedup::is_enabled()) { - // Deduplicate the string before it is interned. Note that we should never - // deduplicate a string after it has been interned. Doing so will counteract - // compiler optimizations done on e.g. interned string literals. - G1StringDedup::deduplicate(string()); - } -#endif + // Deduplicate the string before it is interned. Note that we should never + // deduplicate a string after it has been interned. Doing so will counteract + // compiler optimizations done on e.g. interned string literals. + Universe::heap()->deduplicate_string(string()); // Grab the StringTable_lock before getting the_table() because it could // change at safepoint. diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/systemDictionary.cpp --- a/src/hotspot/share/classfile/systemDictionary.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/systemDictionary.cpp Tue May 15 10:13:52 2018 -0700 @@ -2076,8 +2076,6 @@ InstanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL); InstanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM); - initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(ReferenceQueue_klass), scan, CHECK); - // JSR 292 classes WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass); WKID jsr292_group_end = WK_KLASS_ENUM_NAME(VolatileCallSite_klass); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/systemDictionary.hpp --- a/src/hotspot/share/classfile/systemDictionary.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/systemDictionary.hpp Tue May 15 10:13:52 2018 -0700 @@ -135,7 +135,6 @@ do_klass(FinalReference_klass, java_lang_ref_FinalReference, Pre ) \ do_klass(PhantomReference_klass, java_lang_ref_PhantomReference, Pre ) \ do_klass(Finalizer_klass, java_lang_ref_Finalizer, Pre ) \ - do_klass(ReferenceQueue_klass, java_lang_ref_ReferenceQueue, Pre ) \ \ do_klass(Thread_klass, java_lang_Thread, Pre ) \ do_klass(ThreadGroup_klass, java_lang_ThreadGroup, Pre ) \ diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/classfile/vmSymbols.hpp --- a/src/hotspot/share/classfile/vmSymbols.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Tue May 15 10:13:52 2018 -0700 @@ -86,7 +86,6 @@ template(java_lang_ref_FinalReference, "java/lang/ref/FinalReference") \ template(java_lang_ref_PhantomReference, "java/lang/ref/PhantomReference") \ template(java_lang_ref_Finalizer, "java/lang/ref/Finalizer") \ - template(java_lang_ref_ReferenceQueue, "java/lang/ref/ReferenceQueue") \ template(java_lang_reflect_AccessibleObject, "java/lang/reflect/AccessibleObject") \ template(java_lang_reflect_Method, "java/lang/reflect/Method") \ template(java_lang_reflect_Constructor, "java/lang/reflect/Constructor") \ @@ -439,8 +438,6 @@ template(module_entry_name, "module_entry") \ template(resolved_references_name, "") \ template(init_lock_name, "") \ - template(referencequeue_null_name, "NULL") \ - template(referencequeue_enqueued_name, "ENQUEUED") \ \ /* name symbols needed by intrinsics */ \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \ @@ -534,8 +531,6 @@ template(string_signature, "Ljava/lang/String;") \ template(string_array_signature, "[Ljava/lang/String;") \ template(reference_signature, "Ljava/lang/ref/Reference;") \ - template(referencequeue_signature, "Ljava/lang/ref/ReferenceQueue;") \ - template(sun_misc_Cleaner_signature, "Lsun/misc/Cleaner;") \ template(executable_signature, "Ljava/lang/reflect/Executable;") \ template(module_signature, "Ljava/lang/Module;") \ template(concurrenthashmap_signature, "Ljava/util/concurrent/ConcurrentHashMap;") \ diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Tue May 15 10:13:52 2018 -0700 @@ -5062,22 +5062,6 @@ assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); } -class CMSRefEnqueueTaskProxy: public AbstractGangTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _task; - -public: - CMSRefEnqueueTaskProxy(EnqueueTask& task) - : AbstractGangTask("Enqueue reference objects in parallel"), - _task(task) - { } - - virtual void work(uint worker_id) - { - _task.work(worker_id); - } -}; - CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): _span(span), @@ -5147,16 +5131,6 @@ workers->run_task(&rp_task); } -void CMSRefProcTaskExecutor::execute(EnqueueTask& task) -{ - - CMSHeap* heap = CMSHeap::heap(); - WorkGang* workers = heap->workers(); - assert(workers != NULL, "Need parallel worker threads."); - CMSRefEnqueueTaskProxy enq_task(task); - workers->run_task(&enq_task); -} - void CMSCollector::refProcessingWork() { ResourceMark rm; HandleMark hm; @@ -7149,7 +7123,7 @@ // coalesced chunk to the appropriate free list. if (inFreeRange()) { assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, - "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())); + "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]", diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Tue May 15 10:13:52 2018 -0700 @@ -487,7 +487,6 @@ // Executes a task using worker threads. virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); private: CMSCollector& _collector; }; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/cms/parNewGeneration.cpp --- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Tue May 15 10:13:52 2018 -0700 @@ -789,21 +789,6 @@ par_scan_state.evacuate_followers_closure()); } -class ParNewRefEnqueueTaskProxy: public AbstractGangTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _task; - -public: - ParNewRefEnqueueTaskProxy(EnqueueTask& task) - : AbstractGangTask("ParNewGeneration parallel reference enqueue"), - _task(task) - { } - - virtual void work(uint worker_id) { - _task.work(worker_id); - } -}; - void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { CMSHeap* gch = CMSHeap::heap(); WorkGang* workers = gch->workers(); @@ -816,14 +801,6 @@ _young_gen.promotion_failed()); } -void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { - CMSHeap* gch = CMSHeap::heap(); - WorkGang* workers = gch->workers(); - assert(workers != NULL, "Need parallel worker threads."); - ParNewRefEnqueueTaskProxy enq_task(task); - workers->run_task(&enq_task); -} - void ParNewRefProcTaskExecutor::set_single_threaded_mode() { _state_set.flush(); CMSHeap* heap = CMSHeap::heap(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/cms/parNewGeneration.hpp --- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -297,7 +297,6 @@ // Executes a task using worker threads. virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); // Switch to single threaded mode. virtual void set_single_threaded_mode(); }; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1Allocator.inline.hpp --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Tue May 15 10:13:52 2018 -0700 @@ -64,7 +64,7 @@ inline PLAB* G1PLABAllocator::alloc_buffer(InCSetState dest) { assert(dest.is_valid(), - "Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()); + "Allocation buffer index out of bounds: " CSETSTATE_FORMAT, dest.value()); assert(_alloc_buffers[dest.value()] != NULL, "Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()); return _alloc_buffers[dest.value()]; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue May 15 10:13:52 2018 -0700 @@ -2164,6 +2164,14 @@ return ret_val; } +void G1CollectedHeap::deduplicate_string(oop str) { + assert(java_lang_String::is_instance(str), "invariant"); + + if (G1StringDedup::is_enabled()) { + G1StringDedup::deduplicate(str); + } +} + void G1CollectedHeap::prepare_for_verify() { _verifier->prepare_for_verify(); } @@ -3783,7 +3791,6 @@ // Executes the given task using concurrent marking worker threads. virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); }; // Gang task for possibly parallel reference processing @@ -3848,35 +3855,6 @@ _workers->run_task(&proc_task_proxy); } -// Gang task for parallel reference enqueueing. - -class G1STWRefEnqueueTaskProxy: public AbstractGangTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _enq_task; - -public: - G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) : - AbstractGangTask("Enqueue reference objects in parallel"), - _enq_task(enq_task) - { } - - virtual void work(uint worker_id) { - _enq_task.work(worker_id); - } -}; - -// Driver routine for parallel reference enqueueing. -// Creates an instance of the ref enqueueing gang -// task and has the worker threads execute it. - -void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) { - assert(_workers != NULL, "Need parallel worker threads."); - - G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task); - - _workers->run_task(&enq_task_proxy); -} - // End of weak reference support closures void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue May 15 10:13:52 2018 -0700 @@ -1338,6 +1338,9 @@ void redirty_logged_cards(); // Verification + // Deduplicate the string + virtual void deduplicate_string(oop str); + // Perform any cleanup actions necessary before allowing a verification. virtual void prepare_for_verify(); @@ -1363,6 +1366,8 @@ virtual const char* const* concurrent_phases() const; virtual bool request_concurrent_phase(const char* phase); + virtual WorkGang* get_safepoint_workers() { return _workers; } + // The methods below are here for convenience and dispatch the // appropriate method depending on value of the given VerifyOption // parameter. The values for that parameter, and their meanings, diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue May 15 10:13:52 2018 -0700 @@ -1520,7 +1520,6 @@ // Executes the given task using concurrent marking worker threads. virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); }; class G1CMRefProcTaskProxy : public AbstractGangTask { @@ -1565,36 +1564,6 @@ _workers->run_task(&proc_task_proxy); } -class G1CMRefEnqueueTaskProxy : public AbstractGangTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _enq_task; - -public: - G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : - AbstractGangTask("Enqueue reference objects in parallel"), - _enq_task(enq_task) { } - - virtual void work(uint worker_id) { - _enq_task.work(worker_id); - } -}; - -void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { - assert(_workers != NULL, "Need parallel worker threads."); - assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); - - G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); - - // Not strictly necessary but... - // - // We need to reset the concurrency level before each - // proxy task execution, so that the termination protocol - // and overflow handling in G1CMTask::do_marking_step() knows - // how many workers to wait for. - _cm->set_concurrency(_active_workers); - _workers->run_task(&enq_task_proxy); -} - void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { ResourceMark rm; HandleMark hm; @@ -1706,6 +1675,44 @@ } } +class G1PrecleanYieldClosure : public YieldClosure { + G1ConcurrentMark* _cm; + +public: + G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } + + virtual bool should_return() { + return _cm->has_aborted(); + } + + virtual bool should_return_fine_grain() { + _cm->do_yield_check(); + return _cm->has_aborted(); + } +}; + +void G1ConcurrentMark::preclean() { + assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); + + SuspendibleThreadSetJoiner joiner; + + G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); + G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); + + set_concurrency_and_phase(1, true); + + G1PrecleanYieldClosure yield_cl(this); + + ReferenceProcessor* rp = _g1h->ref_processor_cm(); + // Precleaning is single threaded. Temporarily disable MT discovery. + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); + rp->preclean_discovered_references(rp->is_alive_non_header(), + &keep_alive, + &drain_mark_stack, + &yield_cl, + _gc_timer_cm); +} + // When sampling object counts, we already swapped the mark bitmaps, so we need to use // the prev bitmap determining liveness. class G1ObjectCountIsAliveClosure: public BoolObjectClosure { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1ConcurrentMark.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue May 15 10:13:52 2018 -0700 @@ -563,6 +563,9 @@ // Do concurrent phase of marking, to a tentative transitive closure. void mark_from_roots(); + // Do concurrent preclean work. + void preclean(); + void remark(); void cleanup(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Tue May 15 10:13:52 2018 -0700 @@ -57,6 +57,7 @@ expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions") \ expander(CONCURRENT_MARK,, "Concurrent Mark") \ expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots") \ + expander(PRECLEAN,, "Concurrent Preclean") \ expander(BEFORE_REMARK,, NULL) \ expander(REMARK,, NULL) \ expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \ @@ -309,7 +310,12 @@ break; } - // Provide a control point after mark_from_roots. + if (G1UseReferencePrecleaning) { + G1ConcPhase p(G1ConcurrentPhase::PRECLEAN, this); + _cm->preclean(); + } + + // Provide a control point before remark. { G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp --- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,14 +63,6 @@ *marker->stack_closure()); } -G1FullGCReferenceProcessingExecutor::G1RefEnqueueTaskProxy::G1RefEnqueueTaskProxy(EnqueueTask& enq_task) : - AbstractGangTask("G1 reference enqueue task"), - _enq_task(enq_task) { } - -void G1FullGCReferenceProcessingExecutor::G1RefEnqueueTaskProxy::work(uint worker_id) { - _enq_task.work(worker_id); -} - void G1FullGCReferenceProcessingExecutor::run_task(AbstractGangTask* task) { G1CollectedHeap::heap()->workers()->run_task(task, _collector->workers()); } @@ -80,12 +72,6 @@ run_task(&proc_task_proxy); } -// Driver routine for parallel reference processing. -void G1FullGCReferenceProcessingExecutor::execute(EnqueueTask& enq_task) { - G1RefEnqueueTaskProxy enq_task_proxy(enq_task); - run_task(&enq_task_proxy); -} - void G1FullGCReferenceProcessingExecutor::execute(STWGCTimer* timer, G1FullGCTracer* tracer) { GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", timer); // Process reference objects found during marking. diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp --- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,6 @@ // Executes the given task using concurrent marking worker threads. virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); private: void run_task(AbstractGangTask* task); @@ -68,15 +67,6 @@ virtual void work(uint worker_id); }; - - class G1RefEnqueueTaskProxy: public AbstractGangTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _enq_task; - - public: - G1RefEnqueueTaskProxy(EnqueueTask& enq_task); - virtual void work(uint worker_id); - }; }; #endif // SHARE_GC_G1_G1FULLGCREFERENCEPROCESSOREXECUTOR_HPP diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/g1/g1_globals.hpp --- a/src/hotspot/share/gc/g1/g1_globals.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1_globals.hpp Tue May 15 10:13:52 2018 -0700 @@ -79,6 +79,10 @@ "draining concurrent marking work queues.") \ range(1, INT_MAX) \ \ + experimental(bool, G1UseReferencePrecleaning, true, \ + "Concurrently preclean java.lang.ref.references instances " \ + "before the Remark pause.") \ + \ experimental(double, G1LastPLABAverageOccupancy, 50.0, \ "The expected average occupancy of the last PLAB in " \ "percent.") \ diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/pcTasks.cpp --- a/src/hotspot/share/gc/parallel/pcTasks.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/pcTasks.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,15 +60,7 @@ ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); - if (_java_thread != NULL) - _java_thread->oops_do( - &mark_and_push_closure, - &mark_and_push_in_blobs); - - if (_vm_thread != NULL) - _vm_thread->oops_do( - &mark_and_push_closure, - &mark_and_push_in_blobs); + _thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs); // Do the real work cm->follow_marking_stacks(); @@ -175,17 +167,6 @@ PSParallelCompact::gc_task_manager()->execute_and_wait(q); } -void RefProcTaskExecutor::execute(EnqueueTask& task) -{ - ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); - uint parallel_gc_threads = heap->gc_task_manager()->workers(); - GCTaskQueue* q = GCTaskQueue::create(); - for(uint i=0; ienqueue(new RefEnqueueTaskProxy(task, i)); - } - PSParallelCompact::gc_task_manager()->execute_and_wait(q); -} - // // StealMarkingTask // diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/pcTasks.hpp --- a/src/hotspot/share/gc/parallel/pcTasks.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/pcTasks.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,11 +67,10 @@ class ThreadRootsMarkingTask : public GCTask { private: - JavaThread* _java_thread; - VMThread* _vm_thread; + Thread* _thread; + public: - ThreadRootsMarkingTask(JavaThread* root) : _java_thread(root), _vm_thread(NULL) {} - ThreadRootsMarkingTask(VMThread* root) : _java_thread(NULL), _vm_thread(root) {} + ThreadRootsMarkingTask(Thread* root) : _thread(root) {} char* name() { return (char *)"thread-roots-marking-task"; } @@ -133,32 +132,6 @@ }; - -// -// RefEnqueueTaskProxy -// -// This task is used as a proxy to parallel reference processing tasks . -// - -class RefEnqueueTaskProxy: public GCTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _enq_task; - uint _work_id; - -public: - RefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) - : _enq_task(enq_task), - _work_id(work_id) - { } - - virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } - virtual void do_it(GCTaskManager* manager, uint which) - { - _enq_task.work(_work_id); - } -}; - - // // RefProcTaskExecutor // @@ -168,7 +141,6 @@ class RefProcTaskExecutor: public AbstractRefProcTaskExecutor { virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); }; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/psMarkSweep.cpp --- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Tue May 15 10:13:52 2018 -0700 @@ -71,6 +71,7 @@ _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region()); set_ref_processor(new ReferenceProcessor(&_span_based_discoverer)); // a vanilla ref proc _counters = new CollectorCounters("PSMarkSweep", 1); + MarkSweep::initialize(); } // This method contains all heap specific policy for invoking mark sweep. @@ -292,7 +293,7 @@ assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), - "Sizes of space in young gen are out-of-bounds"); + "Sizes of space in young gen are out of bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/psParallelCompact.cpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Tue May 15 10:13:52 2018 -0700 @@ -2049,6 +2049,17 @@ return ParallelScavengeHeap::gc_task_manager(); } +class PCAddThreadRootsMarkingTaskClosure : public ThreadClosure { +private: + GCTaskQueue* _q; + +public: + PCAddThreadRootsMarkingTaskClosure(GCTaskQueue* q) : _q(q) { } + void do_thread(Thread* t) { + _q->enqueue(new ThreadRootsMarkingTask(t)); + } +}; + void PSParallelCompact::marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { @@ -2077,7 +2088,8 @@ q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles)); // We scan the thread roots in parallel - Threads::create_thread_roots_marking_tasks(q); + PCAddThreadRootsMarkingTaskClosure cl(q); + Threads::java_threads_and_vm_thread_do(&cl); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management)); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary)); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/psScavenge.cpp --- a/src/hotspot/share/gc/parallel/psScavenge.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Tue May 15 10:13:52 2018 -0700 @@ -148,27 +148,8 @@ _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); } -class PSRefEnqueueTaskProxy: public GCTask { - typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; - EnqueueTask& _enq_task; - uint _work_id; - -public: - PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) - : _enq_task(enq_task), - _work_id(work_id) - { } - - virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } - virtual void do_it(GCTaskManager* manager, uint which) - { - _enq_task.work(_work_id); - } -}; - class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { virtual void execute(ProcessTask& task); - virtual void execute(EnqueueTask& task); }; void PSRefProcTaskExecutor::execute(ProcessTask& task) @@ -188,17 +169,6 @@ manager->execute_and_wait(q); } - -void PSRefProcTaskExecutor::execute(EnqueueTask& task) -{ - GCTaskQueue* q = GCTaskQueue::create(); - GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); - for(uint i=0; i < manager->active_workers(); i++) { - q->enqueue(new PSRefEnqueueTaskProxy(task, i)); - } - manager->execute_and_wait(q); -} - // This method contains all heap specific policy for invoking scavenge. // PSScavenge::invoke_no_policy() will do nothing but attempt to // scavenge. It will not clean up after failed promotions, bail out if @@ -242,6 +212,17 @@ return full_gc_done; } +class PSAddThreadRootsTaskClosure : public ThreadClosure { +private: + GCTaskQueue* _q; + +public: + PSAddThreadRootsTaskClosure(GCTaskQueue* q) : _q(q) { } + void do_thread(Thread* t) { + _q->enqueue(new ThreadRootsTask(t)); + } +}; + // This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { @@ -382,7 +363,8 @@ q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel - Threads::create_thread_roots_tasks(q); + PSAddThreadRootsTaskClosure cl(q); + Threads::java_threads_and_vm_thread_do(&cl); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/psTasks.cpp --- a/src/hotspot/share/gc/parallel/psTasks.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psTasks.cpp Tue May 15 10:13:52 2018 -0700 @@ -119,11 +119,7 @@ PSScavengeRootsClosure roots_closure(pm); MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations); - if (_java_thread != NULL) - _java_thread->oops_do(&roots_closure, &roots_in_blobs); - - if (_vm_thread != NULL) - _vm_thread->oops_do(&roots_closure, &roots_in_blobs); + _thread->oops_do(&roots_closure, &roots_in_blobs); // Do the real work pm->drain_stacks(false); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/parallel/psTasks.hpp --- a/src/hotspot/share/gc/parallel/psTasks.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psTasks.hpp Tue May 15 10:13:52 2018 -0700 @@ -81,11 +81,10 @@ class ThreadRootsTask : public GCTask { private: - JavaThread* _java_thread; - VMThread* _vm_thread; + Thread* _thread; + public: - ThreadRootsTask(JavaThread* root) : _java_thread(root), _vm_thread(NULL) {} - ThreadRootsTask(VMThread* root) : _java_thread(NULL), _vm_thread(root) {} + ThreadRootsTask(Thread* root) : _thread(root) {} char* name() { return (char *)"thread-roots-task"; } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/serial/markSweep.cpp --- a/src/hotspot/share/gc/serial/markSweep.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/serial/markSweep.cpp Tue May 15 10:13:52 2018 -0700 @@ -265,7 +265,7 @@ void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); } -void marksweep_init() { +void MarkSweep::initialize() { MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer(); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/serial/markSweep.hpp --- a/src/hotspot/share/gc/serial/markSweep.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/serial/markSweep.hpp Tue May 15 10:13:52 2018 -0700 @@ -87,7 +87,6 @@ friend class AdjustPointerClosure; friend class KeepAliveClosure; friend class VM_MarkSweep; - friend void marksweep_init(); // // Vars @@ -117,6 +116,8 @@ static KeepAliveClosure keep_alive; public: + static void initialize(); + // Public closures static IsAliveClosure is_alive; static FollowRootClosure follow_root_closure; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Tue May 15 10:13:52 2018 -0700 @@ -652,3 +652,7 @@ void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { ShouldNotReachHere(); } + +void CollectedHeap::deduplicate_string(oop str) { + // Do nothing, unless overridden in subclass. +} diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Tue May 15 10:13:52 2018 -0700 @@ -597,6 +597,9 @@ virtual oop pin_object(JavaThread* thread, oop obj); virtual void unpin_object(JavaThread* thread, oop obj); + // Deduplicate the string, iff the GC supports string deduplication. + virtual void deduplicate_string(oop str); + virtual bool is_oop(oop object) const; // Non product verification and debugging. diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Tue May 15 10:13:52 2018 -0700 @@ -182,6 +182,8 @@ initialize_size_policy(def_new_gen->eden()->capacity(), _old_gen->capacity(), def_new_gen->from()->capacity()); + + MarkSweep::initialize(); } void GenCollectedHeap::ref_processing_init() { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/referenceProcessor.cpp --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Tue May 15 10:13:52 2018 -0700 @@ -594,19 +594,33 @@ bool _clear_referent; }; +void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { + LogTarget(Trace, gc, ref) lt; + + if (!lt.is_enabled()) { + return; + } + + size_t total = 0; + + LogStream ls(lt); + ls.print("%s", prefix); + for (uint i = 0; i < num_active_queues; i++) { + ls.print(SIZE_FORMAT " ", list[i].length()); + total += list[i].length(); + } + ls.print_cr("(" SIZE_FORMAT ")", total); +} + #ifndef PRODUCT -void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { +void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { if (!log_is_enabled(Trace, gc, ref)) { return; } - stringStream st; - for (uint i = 0; i < active_length; ++i) { - st.print(SIZE_FORMAT " ", ref_lists[i].length()); - } - log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); + log_reflist("", ref_lists, num_active_queues); #ifdef ASSERT - for (uint i = active_length; i < _max_num_queues; i++) { + for (uint i = num_active_queues; i < _max_num_queues; i++) { assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", ref_lists[i].length(), i); } @@ -629,10 +643,11 @@ size_t total_refs = 0; log_develop_trace(gc, ref)("Balance ref_lists "); + log_reflist_counts(ref_lists, _max_num_queues); + for (uint i = 0; i < _max_num_queues; ++i) { total_refs += ref_lists[i].length(); } - log_reflist_counts(ref_lists, _max_num_queues, total_refs); size_t avg_refs = total_refs / _num_queues + 1; uint to_idx = 0; for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { @@ -693,11 +708,11 @@ } } #ifdef ASSERT + log_reflist_counts(ref_lists, _num_queues); size_t balanced_total_refs = 0; for (uint i = 0; i < _num_queues; ++i) { balanced_total_refs += ref_lists[i].length(); } - log_reflist_counts(ref_lists, _num_queues, balanced_total_refs); assert(total_refs == balanced_total_refs, "Balancing was incomplete"); #endif } @@ -790,7 +805,7 @@ id = next_id(); } } - assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues); + assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues); // Get the discovered queue to which we will add DiscoveredList* list = NULL; @@ -1011,63 +1026,79 @@ return false; } -// Preclean the discovered references by removing those -// whose referents are alive, and by marking from those that -// are not active. These lists can be handled here -// in any order and, indeed, concurrently. -void ReferenceProcessor::preclean_discovered_references( - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - YieldClosure* yield, - GCTimer* gc_timer) { +void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + YieldClosure* yield, + GCTimer* gc_timer) { + // These lists can be handled here in any order and, indeed, concurrently. // Soft references { GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); + log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } - preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, - keep_alive, complete_gc, yield); + if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, + keep_alive, complete_gc, yield)) { + log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); + return; + } } + log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); } // Weak references { GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); + log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } - preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, - keep_alive, complete_gc, yield); + if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, + keep_alive, complete_gc, yield)) { + log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); + return; + } } + log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); } // Final references { GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); + log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } - preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, - keep_alive, complete_gc, yield); + if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, + keep_alive, complete_gc, yield)) { + log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); + return; + } } + log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); } // Phantom references { GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); + log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } - preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, - keep_alive, complete_gc, yield); + if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, + keep_alive, complete_gc, yield)) { + log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); + return; + } } + log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); } } @@ -1079,19 +1110,20 @@ // java.lang.Reference. As a result, we need to be careful below // that ref removal steps interleave safely with ref discovery steps // (in this thread). -void -ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc, - YieldClosure* yield) { +bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + YieldClosure* yield) { DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { + if (yield->should_return_fine_grain()) { + return true; + } iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); oop obj = iter.obj(); oop next = java_lang_ref_Reference::next(obj); - if (iter.referent() == NULL || iter.is_referent_alive() || - next != NULL) { + if (iter.referent() == NULL || iter.is_referent_alive() || next != NULL) { // The referent has been cleared, or is alive, or the Reference is not // active; we need to trace and mark its cohort. log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", @@ -1121,6 +1153,7 @@ iter.removed(), iter.processed(), p2i(&refs_list)); } ) + return false; } const char* ReferenceProcessor::list_name(uint i) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/referenceProcessor.hpp --- a/src/hotspot/share/gc/shared/referenceProcessor.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp Tue May 15 10:13:52 2018 -0700 @@ -279,15 +279,15 @@ OopClosure* keep_alive, VoidClosure* complete_gc); - // "Preclean" all the discovered reference lists - // by removing references with strongly reachable referents. + // "Preclean" all the discovered reference lists by removing references that + // are active (e.g. due to the mutator calling enqueue()) or with NULL or + // strongly reachable referents. // The first argument is a predicate on an oop that indicates - // its (strong) reachability and the second is a closure that + // its (strong) reachability and the fourth is a closure that // may be used to incrementalize or abort the precleaning process. // The caller is responsible for taking care of potential // interference with concurrent operations on these lists - // (or predicates involved) by other threads. Currently - // only used by the CMS collector. + // (or predicates involved) by other threads. void preclean_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, @@ -298,15 +298,17 @@ // occupying the i / _num_queues slot. const char* list_name(uint i); - // "Preclean" the given discovered reference list - // by removing references with strongly reachable referents. - // Currently used in support of CMS only. - void preclean_discovered_reflist(DiscoveredList& refs_list, +private: + // "Preclean" the given discovered reference list by removing references with + // the attributes mentioned in preclean_discovered_references(). + // Supports both normal and fine grain yielding. + // Returns whether the operation should be aborted. + bool preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); -private: + // round-robin mod _num_queues (not: _not_ mod _max_num_queues) uint next_id() { uint id = _next_id; @@ -323,7 +325,8 @@ void clear_discovered_references(DiscoveredList& refs_list); - void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN; + void log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues); + void log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) PRODUCT_RETURN; // Balances reference queues. void balance_queues(DiscoveredList ref_lists[]); @@ -589,11 +592,9 @@ // Abstract tasks to execute. class ProcessTask; - class EnqueueTask; // Executes a task using worker threads. virtual void execute(ProcessTask& task) = 0; - virtual void execute(EnqueueTask& task) = 0; // Switch to single threaded mode. virtual void set_single_threaded_mode() { }; @@ -628,27 +629,4 @@ const bool _marks_oops_alive; }; -// Abstract reference processing task to execute. -class AbstractRefProcTaskExecutor::EnqueueTask { -protected: - EnqueueTask(ReferenceProcessor& ref_processor, - DiscoveredList refs_lists[], - int n_queues, - ReferenceProcessorPhaseTimes* phase_times) - : _ref_processor(ref_processor), - _refs_lists(refs_lists), - _n_queues(n_queues), - _phase_times(phase_times) - { } - -public: - virtual void work(unsigned int work_id) = 0; - -protected: - ReferenceProcessor& _ref_processor; - DiscoveredList* _refs_lists; - ReferenceProcessorPhaseTimes* _phase_times; - int _n_queues; -}; - #endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp Tue May 15 10:13:52 2018 -0700 @@ -79,6 +79,8 @@ size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + bool is_last_allocation(HeapWord* obj, size_t size) { return pointer_delta(top(), obj) == size; } + // Make parsable and release it. void reset(); @@ -129,6 +131,9 @@ // Allocate size HeapWords. The memory is NOT initialized to zero. inline HeapWord* allocate(size_t size); + // Undo last allocation. + inline bool undo_allocate(HeapWord* obj, size_t size); + // Reserve space at the end of TLAB static size_t end_reserve() { int reserve_size = typeArrayOopDesc::header_size(T_INT); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp Tue May 15 10:13:52 2018 -0700 @@ -53,6 +53,19 @@ return NULL; } +inline bool ThreadLocalAllocBuffer::undo_allocate(HeapWord* obj, size_t size) { + invariants(); + + if (!is_last_allocation(obj, size)) { + return false; + } + + set_top(obj); + + invariants(); + return true; +} + inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) { // Compute the size for the new TLAB. // The "last" tlab may be smaller to reduce fragmentation. @@ -76,7 +89,7 @@ inline size_t ThreadLocalAllocBuffer::compute_min_size(size_t obj_size) { const size_t aligned_obj_size = align_object_size(obj_size); const size_t size_with_reserve = aligned_obj_size + alignment_reserve(); - return MAX2(size_with_reserve, MinTLABSize); + return MAX2(size_with_reserve, heap_word_size(MinTLABSize)); } void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/interpreter/interpreterRuntime.cpp --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Tue May 15 10:13:52 2018 -0700 @@ -58,6 +58,7 @@ #include "runtime/icache.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" #include "runtime/osThread.hpp" #include "runtime/sharedRuntime.hpp" @@ -446,17 +447,16 @@ thread->set_vm_result(exception()); IRT_END - -IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index)) - char message[jintAsStringSize]; - // lookup exception klass - TempNewSymbol s = SymbolTable::new_symbol(name, CHECK); +IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, arrayOopDesc* a, jint index)) if (ProfileTraps) { note_trap(thread, Deoptimization::Reason_range_check, CHECK); } - // create exception - sprintf(message, "%d", index); - THROW_MSG(s, message); + + ResourceMark rm(thread); + stringStream ss; + ss.print("Index %d out of bounds for length %d", index, a->length()); + + THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); IRT_END IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/interpreter/interpreterRuntime.hpp --- a/src/hotspot/share/interpreter/interpreterRuntime.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp Tue May 15 10:13:52 2018 -0700 @@ -83,7 +83,7 @@ Klass* interfaceKlass); static void throw_StackOverflowError(JavaThread* thread); static void throw_delayed_StackOverflowError(JavaThread* thread); - static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index); + static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, arrayOopDesc* a, jint index); static void throw_ClassCastException(JavaThread* thread, oopDesc* obj); static void create_exception(JavaThread* thread, char* name, char* message); static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/interpreter/templateInterpreterGenerator.cpp --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Tue May 15 10:13:52 2018 -0700 @@ -173,11 +173,11 @@ } { CodeletMark cm(_masm, "throw exception entrypoints"); - Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException"); - Interpreter::_throw_ArrayStoreException_entry = generate_klass_exception_handler("java/lang/ArrayStoreException" ); - Interpreter::_throw_ArithmeticException_entry = generate_exception_handler("java/lang/ArithmeticException" , "/ by zero"); + Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler(); + Interpreter::_throw_ArrayStoreException_entry = generate_klass_exception_handler("java/lang/ArrayStoreException"); + Interpreter::_throw_ArithmeticException_entry = generate_exception_handler("java/lang/ArithmeticException", "/ by zero"); Interpreter::_throw_ClassCastException_entry = generate_ClassCastException_handler(); - Interpreter::_throw_NullPointerException_entry = generate_exception_handler("java/lang/NullPointerException" , NULL ); + Interpreter::_throw_NullPointerException_entry = generate_exception_handler("java/lang/NullPointerException", NULL); Interpreter::_throw_StackOverflowError_entry = generate_StackOverflowError_handler(); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/interpreter/templateInterpreterGenerator.hpp --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.hpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ } address generate_exception_handler_common(const char* name, const char* message, bool pass_oop); address generate_ClassCastException_handler(); - address generate_ArrayIndexOutOfBounds_handler(const char* name); + address generate_ArrayIndexOutOfBounds_handler(); address generate_return_entry_for(TosState state, int step, size_t index_size); address generate_earlyret_entry_for(TosState state); address generate_deopt_entry_for(TosState state, int step, address continuation = NULL); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/jvmci/jvmciCodeInstaller.cpp --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Tue May 15 10:13:52 2018 -0700 @@ -591,6 +591,9 @@ // Get instructions and constants CodeSections early because we need it. _instructions = buffer.insts(); _constants = buffer.consts(); +#if INCLUDE_AOT + buffer.set_immutable_PIC(_immutable_pic_compilation); +#endif initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK); JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, false, CHECK_OK); @@ -624,6 +627,9 @@ // Get instructions and constants CodeSections early because we need it. _instructions = buffer.insts(); _constants = buffer.consts(); +#if INCLUDE_AOT + buffer.set_immutable_PIC(_immutable_pic_compilation); +#endif initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK); JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, true, CHECK_OK); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/iterator.hpp --- a/src/hotspot/share/memory/iterator.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/iterator.hpp Tue May 15 10:13:52 2018 -0700 @@ -318,8 +318,11 @@ // by means of checking the return value from the polling // call. class YieldClosure : public StackObj { - public: - virtual bool should_return() = 0; +public: + virtual bool should_return() = 0; + + // Yield on a fine-grain level. The check in case of not yielding should be very fast. + virtual bool should_return_fine_grain() { return false; } }; // Abstract closure for serializing data (read or write). diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/metachunk.cpp --- a/src/hotspot/share/memory/metachunk.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/metachunk.cpp Tue May 15 10:13:52 2018 -0700 @@ -107,7 +107,7 @@ Copy::fill_to_words(start, size, word_value); } -void Metachunk::verify() { +void Metachunk::verify() const { assert(is_valid_sentinel(), "Chunk " PTR_FORMAT ": sentinel invalid", p2i(this)); const ChunkIndex chunk_type = get_chunk_type(); assert(is_valid_chunktype(chunk_type), "Chunk " PTR_FORMAT ": Invalid chunk type.", p2i(this)); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/metachunk.hpp --- a/src/hotspot/share/memory/metachunk.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/metachunk.hpp Tue May 15 10:13:52 2018 -0700 @@ -228,7 +228,7 @@ bool is_class() const { return _is_class; } DEBUG_ONLY(void mangle(juint word_value);) - DEBUG_ONLY(void verify();) + DEBUG_ONLY(void verify() const;) }; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/metaspace.cpp Tue May 15 10:13:52 2018 -0700 @@ -299,11 +299,11 @@ void remove_chunk(Metachunk* chunk); // Return a single chunk of type index to the ChunkManager. - void return_single_chunk(ChunkIndex index, Metachunk* chunk); + void return_single_chunk(Metachunk* chunk); // Add the simple linked list of chunks to the freelist of chunks // of type index. - void return_chunk_list(ChunkIndex index, Metachunk* chunk); + void return_chunk_list(Metachunk* chunks); // Total of the space in the free chunks list size_t free_chunks_total_words(); @@ -1281,7 +1281,7 @@ // List of chunks in use by this SpaceManager. Allocations // are done from the current chunk. The list is used for deallocating // chunks when the SpaceManager is freed. - Metachunk* _chunks_in_use[NumberOfInUseLists]; + Metachunk* _chunk_list; Metachunk* _current_chunk; // Maximum number of small chunks to allocate to a SpaceManager @@ -1298,6 +1298,7 @@ size_t _overhead_words; size_t _capacity_words; size_t _used_words; + uintx _num_chunks_by_type[NumberOfInUseLists]; // Free lists of blocks are per SpaceManager since they // are assumed to be in chunks in use by the SpaceManager @@ -1307,10 +1308,7 @@ private: // Accessors - Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } - void set_chunks_in_use(ChunkIndex index, Metachunk* v) { - _chunks_in_use[index] = v; - } + Metachunk* chunk_list() const { return _chunk_list; } BlockFreelist* block_freelists() const { return _block_freelists; } @@ -1338,9 +1336,6 @@ // Verify internal counters against the current state. Expects to be locked with lock(). DEBUG_ONLY(void verify_metrics_locked() const;) - protected: - void initialize(); - public: SpaceManager(Metaspace::MetadataType mdtype, Metaspace::MetaspaceType space_type, @@ -1393,7 +1388,7 @@ size_t get_initial_chunk_size(Metaspace::MetaspaceType type) const; // Todo: remove this once we have counters by chunk type. - size_t sum_count_in_chunks_in_use(ChunkIndex i); + uintx num_chunks_by_type(ChunkIndex chunk_type) const { return _num_chunks_by_type[chunk_type]; } Metachunk* get_new_chunk(size_t chunk_word_size); @@ -1619,7 +1614,7 @@ // Return Chunk to freelist. inc_container_count(); - chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk); + chunk_manager->return_single_chunk(padding_chunk); // Please note: at this point, ChunkManager::return_single_chunk() // may already have merged the padding chunk with neighboring chunks, so // it may have vanished at this point. Do not reference the padding @@ -2122,7 +2117,7 @@ if (chunk == NULL) { break; } - chunk_manager->return_single_chunk(index, chunk); + chunk_manager->return_single_chunk(chunk); } DEBUG_ONLY(verify_container_count();) } @@ -3026,10 +3021,10 @@ return chunk; } -void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) { +void ChunkManager::return_single_chunk(Metachunk* chunk) { + const ChunkIndex index = chunk->get_chunk_type(); assert_lock_strong(MetaspaceExpand_lock); DEBUG_ONLY(do_verify_chunk(chunk);) - assert(chunk->get_chunk_type() == index, "Chunk does not match expected index."); assert(chunk != NULL, "Expected chunk."); assert(chunk->container() != NULL, "Container should have been set."); assert(chunk->is_tagged_free() == false, "Chunk should be in use."); @@ -3077,14 +3072,13 @@ } -void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) { - index_bounds_check(index); +void ChunkManager::return_chunk_list(Metachunk* chunks) { if (chunks == NULL) { return; } LogTarget(Trace, gc, metaspace, freelist) log; if (log.is_enabled()) { // tracing - log.print("returning list of %s chunks...", chunk_size_name(index)); + log.print("returning list of chunks..."); } unsigned num_chunks_returned = 0; size_t size_chunks_returned = 0; @@ -3097,17 +3091,12 @@ num_chunks_returned ++; size_chunks_returned += cur->word_size(); } - return_single_chunk(index, cur); + return_single_chunk(cur); cur = next; } if (log.is_enabled()) { // tracing - log.print("returned %u %s chunks to freelist, total word size " SIZE_FORMAT ".", - num_chunks_returned, chunk_size_name(index), size_chunks_returned); - if (index != HumongousIndex) { - log.print("updated freelist count: " SIZE_FORMAT ".", free_chunks(index)->size()); - } else { - log.print("updated dictionary count " SIZE_FORMAT ".", _humongous_dictionary.total_count()); - } + log.print("returned %u chunks to freelist, total word size " SIZE_FORMAT ".", + num_chunks_returned, size_chunks_returned); } } @@ -3170,28 +3159,11 @@ return adjusted; } -size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { - size_t count = 0; - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - count++; - chunk = chunk->next(); - } - return count; -} - void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* chunk = chunks_in_use(i); - st->print("SpaceManager: %s " PTR_FORMAT, - chunk_size_name(i), p2i(chunk)); - if (chunk != NULL) { - st->print_cr(" free " SIZE_FORMAT, - chunk->free_word_size()); - } else { - st->cr(); - } + st->print("SpaceManager: " UINTX_FORMAT " %s chunks.", + num_chunks_by_type(i), chunk_size_name(i)); } chunk_manager()->locked_print_free_chunks(st); @@ -3212,13 +3184,13 @@ // reduces space waste from 60+% to around 30%. if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) && _mdtype == Metaspace::NonClassType && - sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit && + num_chunks_by_type(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit && word_size + Metachunk::overhead() <= SpecializedChunk) { return SpecializedChunk; } - if (chunks_in_use(MediumIndex) == NULL && - sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { + if (num_chunks_by_type(MediumIndex) == 0 && + num_chunks_by_type(SmallIndex) < _small_chunk_limit) { chunk_word_size = (size_t) small_chunk_size(); if (word_size + Metachunk::overhead() > small_chunk_size()) { chunk_word_size = medium_chunk_size(); @@ -3324,9 +3296,13 @@ _used_words(0), _overhead_words(0), _block_freelists(NULL), - _lock(lock) + _lock(lock), + _chunk_list(NULL), + _current_chunk(NULL) { - initialize(); + Metadebug::init_allocation_fail_alot_count(); + memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type)); + log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); } void SpaceManager::account_for_new_chunk(const Metachunk* new_chunk) { @@ -3335,6 +3311,8 @@ _capacity_words += new_chunk->word_size(); _overhead_words += Metachunk::overhead(); + DEBUG_ONLY(new_chunk->verify()); + _num_chunks_by_type[new_chunk->get_chunk_type()] ++; // Adjust global counters: MetaspaceUtils::inc_capacity(mdtype(), new_chunk->word_size()); @@ -3362,15 +3340,6 @@ MetaspaceUtils::dec_used(mdtype(), _used_words); } -void SpaceManager::initialize() { - Metadebug::init_allocation_fail_alot_count(); - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - _chunks_in_use[i] = NULL; - } - _current_chunk = NULL; - log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); -} - SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding MetaspaceExpand_lock @@ -3399,12 +3368,11 @@ // Follow each list of chunks-in-use and add them to the // free lists. Each list is NULL terminated. - - for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) { - Metachunk* chunks = chunks_in_use(i); - chunk_manager()->return_chunk_list(i, chunks); - set_chunks_in_use(i, NULL); - } + chunk_manager()->return_chunk_list(chunk_list()); +#ifdef ASSERT + _chunk_list = NULL; + _current_chunk = NULL; +#endif chunk_manager()->slow_locked_verify(); @@ -3446,8 +3414,8 @@ } // Add the new chunk at the head of its respective chunk list. - new_chunk->set_next(chunks_in_use(index)); - set_chunks_in_use(index, new_chunk); + new_chunk->set_next(_chunk_list); + _chunk_list = new_chunk; // Adjust counters. account_for_new_chunk(new_chunk); @@ -3540,21 +3508,17 @@ if (result != NULL) { account_for_allocation(word_size); - assert(result != (MetaWord*) chunks_in_use(MediumIndex), - "Head of the list is being allocated"); } return result; } void SpaceManager::verify() { - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* curr = chunks_in_use(i); - while (curr != NULL) { - DEBUG_ONLY(do_verify_chunk(curr);) - assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use."); - curr = curr->next(); - } + Metachunk* curr = chunk_list(); + while (curr != NULL) { + DEBUG_ONLY(do_verify_chunk(curr);) + assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use."); + curr = curr->next(); } } @@ -3569,21 +3533,19 @@ void SpaceManager::add_to_statistics_locked(SpaceManagerStatistics* out) const { assert_lock_strong(lock()); - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - UsedChunksStatistics& chunk_stat = out->chunk_stats(i); - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - chunk_stat.add_num(1); - chunk_stat.add_cap(chunk->word_size()); - chunk_stat.add_overhead(Metachunk::overhead()); - chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead()); - if (chunk != current_chunk()) { - chunk_stat.add_waste(chunk->free_word_size()); - } else { - chunk_stat.add_free(chunk->free_word_size()); - } - chunk = chunk->next(); + Metachunk* chunk = chunk_list(); + while (chunk != NULL) { + UsedChunksStatistics& chunk_stat = out->chunk_stats(chunk->get_chunk_type()); + chunk_stat.add_num(1); + chunk_stat.add_cap(chunk->word_size()); + chunk_stat.add_overhead(Metachunk::overhead()); + chunk_stat.add_used(chunk->used_word_size() - Metachunk::overhead()); + if (chunk != current_chunk()) { + chunk_stat.add_waste(chunk->free_word_size()); + } else { + chunk_stat.add_free(chunk->free_word_size()); } + chunk = chunk->next(); } if (block_freelists() != NULL) { out->add_free_blocks_info(block_freelists()->num_blocks(), block_freelists()->total_size()); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Tue May 15 10:13:52 2018 -0700 @@ -1922,6 +1922,10 @@ } } +bool MetaspaceShared::is_archive_object(oop p) { + return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p); +} + void MetaspaceShared::fixup_mapped_heap_regions() { FileMapInfo *mapinfo = FileMapInfo::current_info(); mapinfo->fixup_mapped_heap_regions(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/memory/metaspaceShared.hpp --- a/src/hotspot/share/memory/metaspaceShared.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/memory/metaspaceShared.hpp Tue May 15 10:13:52 2018 -0700 @@ -113,6 +113,9 @@ static oop archive_heap_object(oop obj, Thread* THREAD); static void archive_klass_objects(Thread* THREAD); #endif + + static bool is_archive_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false); + static bool is_heap_object_archiving_allowed() { CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);) NOT_CDS_JAVA_HEAP(return false;) diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/klass.cpp Tue May 15 10:13:52 2018 -0700 @@ -536,7 +536,7 @@ log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m)); if (m != NULL) { // mirror is archived, restore - assert(oopDesc::is_archive_object(m), "must be archived mirror object"); + assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object"); Handle m_h(THREAD, m); java_lang_Class::restore_archived_mirror(this, m_h, loader, module_handle, protection_domain, CHECK); return; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/objArrayKlass.cpp --- a/src/hotspot/share/oops/objArrayKlass.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/objArrayKlass.cpp Tue May 15 10:13:52 2018 -0700 @@ -251,12 +251,34 @@ // Check is all offsets and lengths are non negative if (src_pos < 0 || dst_pos < 0 || length < 0) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + // Pass specific exception reason. + ResourceMark rm; + stringStream ss; + if (src_pos < 0) { + ss.print("arraycopy: source index %d out of bounds for object array[%d]", + src_pos, s->length()); + } else if (dst_pos < 0) { + ss.print("arraycopy: destination index %d out of bounds for object array[%d]", + dst_pos, d->length()); + } else { + ss.print("arraycopy: length %d is negative", length); + } + THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); } // Check if the ranges are valid - if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) - || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + if ((((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) || + (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length())) { + // Pass specific exception reason. + ResourceMark rm; + stringStream ss; + if (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) { + ss.print("arraycopy: last source index %u out of bounds for object array[%d]", + (unsigned int) length + (unsigned int) src_pos, s->length()); + } else { + ss.print("arraycopy: last destination index %u out of bounds for object array[%d]", + (unsigned int) length + (unsigned int) dst_pos, d->length()); + } + THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); } // Special case. Boundary cases must be checked first diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/oop.cpp --- a/src/hotspot/share/oops/oop.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/oop.cpp Tue May 15 10:13:52 2018 -0700 @@ -32,9 +32,6 @@ #include "runtime/handles.inline.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" -#if INCLUDE_G1GC -#include "gc/g1/g1Allocator.inline.hpp" -#endif bool always_do_update_barrier = false; @@ -214,9 +211,3 @@ jdouble oopDesc::double_field_acquire(int offset) const { return HeapAccess::load_at(as_oop(), offset); } void oopDesc::release_double_field_put(int offset, jdouble value) { HeapAccess::store_at(as_oop(), offset, value); } - -#if INCLUDE_CDS_JAVA_HEAP -bool oopDesc::is_archive_object(oop p) { - return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p); -} -#endif diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/oop.hpp --- a/src/hotspot/share/oops/oop.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/oop.hpp Tue May 15 10:13:52 2018 -0700 @@ -339,8 +339,6 @@ assert(has_klass_gap(), "only applicable to compressed klass pointers"); return klass_offset_in_bytes() + sizeof(narrowKlass); } - - static bool is_archive_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false); }; #endif // SHARE_VM_OOPS_OOP_HPP diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/oop.inline.hpp --- a/src/hotspot/share/oops/oop.inline.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/oop.inline.hpp Tue May 15 10:13:52 2018 -0700 @@ -26,6 +26,7 @@ #define SHARE_VM_OOPS_OOP_INLINE_HPP #include "gc/shared/collectedHeap.hpp" +#include "memory/metaspaceShared.hpp" #include "oops/access.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" @@ -329,8 +330,8 @@ "forwarding to something not aligned"); assert(Universe::heap()->is_in_reserved(p), "forwarding to something not in heap"); - assert(!is_archive_object(oop(this)) && - !is_archive_object(p), + assert(!MetaspaceShared::is_archive_object(oop(this)) && + !MetaspaceShared::is_archive_object(p), "forwarding archive object"); markOop m = markOopDesc::encode_pointer_as_mark(p); assert(m->decode_pointer() == p, "encoding must be reversable"); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/oops/typeArrayKlass.cpp --- a/src/hotspot/share/oops/typeArrayKlass.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/oops/typeArrayKlass.cpp Tue May 15 10:13:52 2018 -0700 @@ -138,12 +138,36 @@ // Check is all offsets and lengths are non negative if (src_pos < 0 || dst_pos < 0 || length < 0) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + // Pass specific exception reason. + ResourceMark rm; + stringStream ss; + if (src_pos < 0) { + ss.print("arraycopy: source index %d out of bounds for %s[%d]", + src_pos, type2name_tab[ArrayKlass::cast(s->klass())->element_type()], s->length()); + } else if (dst_pos < 0) { + ss.print("arraycopy: destination index %d out of bounds for %s[%d]", + dst_pos, type2name_tab[ArrayKlass::cast(d->klass())->element_type()], d->length()); + } else { + ss.print("arraycopy: length %d is negative", length); + } + THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); } // Check if the ranges are valid - if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) - || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) { - THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); + if ((((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) || + (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length())) { + // Pass specific exception reason. + ResourceMark rm; + stringStream ss; + if (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) { + ss.print("arraycopy: last source index %u out of bounds for %s[%d]", + (unsigned int) length + (unsigned int) src_pos, + type2name_tab[ArrayKlass::cast(s->klass())->element_type()], s->length()); + } else { + ss.print("arraycopy: last destination index %u out of bounds for %s[%d]", + (unsigned int) length + (unsigned int) dst_pos, + type2name_tab[ArrayKlass::cast(d->klass())->element_type()], d->length()); + } + THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); } // Check zero copy if (length == 0) @@ -157,7 +181,6 @@ HeapAccess::arraycopy(s, d, src, dst, (size_t)length << l2es); } - // create a klass of array holding typeArrays Klass* TypeArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) { int dim = dimension(); @@ -240,16 +263,11 @@ void TypeArrayKlass::print_value_on(outputStream* st) const { assert(is_klass(), "must be klass"); st->print("{type array "); - switch (element_type()) { - case T_BOOLEAN: st->print("bool"); break; - case T_CHAR: st->print("char"); break; - case T_FLOAT: st->print("float"); break; - case T_DOUBLE: st->print("double"); break; - case T_BYTE: st->print("byte"); break; - case T_SHORT: st->print("short"); break; - case T_INT: st->print("int"); break; - case T_LONG: st->print("long"); break; - default: ShouldNotReachHere(); + BasicType bt = element_type(); + if (bt == T_BOOLEAN) { + st->print("bool"); + } else { + st->print("%s", type2name_tab[bt]); } st->print("}"); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/opto/library_call.cpp --- a/src/hotspot/share/opto/library_call.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/opto/library_call.cpp Tue May 15 10:13:52 2018 -0700 @@ -52,6 +52,7 @@ #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" #include "prims/unsafe.hpp" +#include "runtime/objectMonitor.hpp" #include "runtime/sharedRuntime.hpp" #ifdef TRACE_HAVE_INTRINSICS #include "trace/traceMacros.hpp" @@ -545,7 +546,7 @@ case vmIntrinsics::_notify: case vmIntrinsics::_notifyAll: - if (InlineNotify) { + if (ObjectMonitor::Knob_InlineNotify) { return inline_notify(intrinsic_id()); } return false; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/opto/loopnode.cpp --- a/src/hotspot/share/opto/loopnode.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/opto/loopnode.cpp Tue May 15 10:13:52 2018 -0700 @@ -413,11 +413,38 @@ Node* trunc1 = NULL; Node* trunc2 = NULL; const TypeInt* iv_trunc_t = NULL; + Node* orig_incr = incr; if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) { return false; // Funny increment opcode } assert(incr->Opcode() == Op_AddI, "wrong increment code"); + const TypeInt* limit_t = gvn->type(limit)->is_int(); + if (trunc1 != NULL) { + // When there is a truncation, we must be sure that after the truncation + // the trip counter will end up higher than the limit, otherwise we are looking + // at an endless loop. Can happen with range checks. + + // Example: + // int i = 0; + // while (true) + // sum + = array[i]; + // i++; + // i = i && 0x7fff; + // } + // + // If the array is shorter than 0x8000 this exits through a AIOOB + // - Counted loop transformation is ok + // If the array is longer then this is an endless loop + // - No transformation can be done. + + const TypeInt* incr_t = gvn->type(orig_incr)->is_int(); + if (limit_t->_hi > incr_t->_hi) { + // if the limit can have a higher value than the increment (before the phi) + return false; + } + } + // Get merge point Node *xphi = incr->in(1); Node *stride = incr->in(2); @@ -499,7 +526,6 @@ } const TypeInt* init_t = gvn->type(init_trip)->is_int(); - const TypeInt* limit_t = gvn->type(limit)->is_int(); if (stride_con > 0) { jlong init_p = (jlong)init_t->_lo + stride_con; @@ -3218,10 +3244,16 @@ void PhaseIdealLoop::recompute_dom_depth() { uint no_depth_marker = C->unique(); uint i; - // Initialize depth to "no depth yet" + // Initialize depth to "no depth yet" and realize all lazy updates for (i = 0; i < _idom_size; i++) { + // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitalized). if (_dom_depth[i] > 0 && _idom[i] != NULL) { - _dom_depth[i] = no_depth_marker; + _dom_depth[i] = no_depth_marker; + + // heal _idom if it has a fwd mapping in _nodes + if (_idom[i]->in(0) == NULL) { + idom(i); + } } } if (_dom_stk == NULL) { diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/opto/loopnode.hpp --- a/src/hotspot/share/opto/loopnode.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/opto/loopnode.hpp Tue May 15 10:13:52 2018 -0700 @@ -852,27 +852,35 @@ // Array of immediate dominance info for each CFG node indexed by node idx private: uint _idom_size; - Node **_idom; // Array of immediate dominators - uint *_dom_depth; // Used for fast LCA test + Node **_idom; // Array of immediate dominators + uint *_dom_depth; // Used for fast LCA test GrowableArray* _dom_stk; // For recomputation of dom depth Node* idom_no_update(Node* d) const { - assert(d->_idx < _idom_size, "oob"); - Node* n = _idom[d->_idx]; + return idom_no_update(d->_idx); + } + + Node* idom_no_update(uint didx) const { + assert(didx < _idom_size, "oob"); + Node* n = _idom[didx]; assert(n != NULL,"Bad immediate dominator info."); - while (n->in(0) == NULL) { // Skip dead CFG nodes - //n = n->in(1); + while (n->in(0) == NULL) { // Skip dead CFG nodes n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); assert(n != NULL,"Bad immediate dominator info."); } return n; } + Node *idom(Node* d) const { - uint didx = d->_idx; - Node *n = idom_no_update(d); - _idom[didx] = n; // Lazily remove dead CFG nodes from table. + return idom(d->_idx); + } + + Node *idom(uint didx) const { + Node *n = idom_no_update(didx); + _idom[didx] = n; // Lazily remove dead CFG nodes from table. return n; } + uint dom_depth(Node* d) const { guarantee(d != NULL, "Null dominator info."); guarantee(d->_idx < _idom_size, ""); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/prims/jvm.cpp --- a/src/hotspot/share/prims/jvm.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/prims/jvm.cpp Tue May 15 10:13:52 2018 -0700 @@ -629,35 +629,6 @@ JVM_END -template -static void fixup_clone_referent(oop src, oop new_obj) { - typedef HeapAccess RefAccess; - const int ref_offset = java_lang_ref_Reference::referent_offset; - oop referent = RefAccess::oop_load_at(src, ref_offset); - RefAccess::oop_store_at(new_obj, ref_offset, referent); -} - -static void fixup_cloned_reference(ReferenceType ref_type, oop src, oop clone) { - // Kludge: After unbarriered clone, re-copy the referent with - // correct barriers. This works for current collectors, but won't - // work for ZGC and maybe other future collectors or variants of - // existing ones (like G1 with concurrent reference processing). - if (ref_type == REF_PHANTOM) { - fixup_clone_referent(src, clone); - } else { - fixup_clone_referent(src, clone); - } - if ((java_lang_ref_Reference::next(clone) != NULL) || - (java_lang_ref_Reference::queue(clone) == java_lang_ref_ReferenceQueue::ENQUEUED_queue())) { - // If the source has been enqueued or is being enqueued, don't - // register the clone with a queue. - java_lang_ref_Reference::set_queue(clone, java_lang_ref_ReferenceQueue::NULL_queue()); - } - // discovered and next are list links; the clone is not in those lists. - java_lang_ref_Reference::set_discovered(clone, NULL); - java_lang_ref_Reference::set_next(clone, NULL); -} - JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle)) JVMWrapper("JVM_Clone"); Handle obj(THREAD, JNIHandles::resolve_non_null(handle)); @@ -676,35 +647,27 @@ #endif // Check if class of obj supports the Cloneable interface. - // All arrays are considered to be cloneable (See JLS 20.1.5) - if (!klass->is_cloneable()) { + // All arrays are considered to be cloneable (See JLS 20.1.5). + // All j.l.r.Reference classes are considered non-cloneable. + if (!klass->is_cloneable() || + (klass->is_instance_klass() && + InstanceKlass::cast(klass)->reference_type() != REF_NONE)) { ResourceMark rm(THREAD); THROW_MSG_0(vmSymbols::java_lang_CloneNotSupportedException(), klass->external_name()); } // Make shallow object copy - ReferenceType ref_type = REF_NONE; const int size = obj->size(); oop new_obj_oop = NULL; if (obj->is_array()) { const int length = ((arrayOop)obj())->length(); new_obj_oop = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL); } else { - ref_type = InstanceKlass::cast(klass)->reference_type(); - assert((ref_type == REF_NONE) == - !klass->is_subclass_of(SystemDictionary::Reference_klass()), - "invariant"); new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL); } HeapAccess<>::clone(obj(), new_obj_oop, size); - // If cloning a Reference, set Reference fields to a safe state. - // Fixup must be completed before any safepoint. - if (ref_type != REF_NONE) { - fixup_cloned_reference(ref_type, obj(), new_obj_oop); - } - Handle new_obj(THREAD, new_obj_oop); // Caution: this involves a java upcall, so the clone should be // "gc-robust" by this stage. diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/prims/whitebox.cpp Tue May 15 10:13:52 2018 -0700 @@ -1713,7 +1713,7 @@ WB_ENTRY(jboolean, WB_IsShared(JNIEnv* env, jobject wb, jobject obj)) oop obj_oop = JNIHandles::resolve(obj); - return oopDesc::is_archive_object(obj_oop); + return MetaspaceShared::is_archive_object(obj_oop); WB_END WB_ENTRY(jboolean, WB_IsSharedClass(JNIEnv* env, jobject wb, jclass clazz)) diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/advancedThresholdPolicy.cpp --- a/src/hotspot/share/runtime/advancedThresholdPolicy.cpp Tue May 15 18:03:31 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,667 +0,0 @@ -/* - * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "code/codeCache.hpp" -#include "runtime/advancedThresholdPolicy.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/simpleThresholdPolicy.inline.hpp" -#if INCLUDE_JVMCI -#include "jvmci/jvmciRuntime.hpp" -#endif - -#ifdef TIERED -// Print an event. -void AdvancedThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level) { - tty->print(" rate="); - if (mh->prev_time() == 0) tty->print("n/a"); - else tty->print("%f", mh->rate()); - - tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), - threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); - -} - -void AdvancedThresholdPolicy::initialize() { - int count = CICompilerCount; -#ifdef _LP64 - // Turn on ergonomic compiler count selection - if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { - FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); - } - if (CICompilerCountPerCPU) { - // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n - int log_cpu = log2_intptr(os::active_processor_count()); - int loglog_cpu = log2_intptr(MAX2(log_cpu, 1)); - count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); - FLAG_SET_ERGO(intx, CICompilerCount, count); - } -#else - // On 32-bit systems, the number of compiler threads is limited to 3. - // On these systems, the virtual address space available to the JVM - // is usually limited to 2-4 GB (the exact value depends on the platform). - // As the compilers (especially C2) can consume a large amount of - // memory, scaling the number of compiler threads with the number of - // available cores can result in the exhaustion of the address space - /// available to the VM and thus cause the VM to crash. - if (FLAG_IS_DEFAULT(CICompilerCount)) { - count = 3; - FLAG_SET_ERGO(intx, CICompilerCount, count); - } -#endif - - if (TieredStopAtLevel < CompLevel_full_optimization) { - // No C2 compiler thread required - set_c1_count(count); - } else { - set_c1_count(MAX2(count / 3, 1)); - set_c2_count(MAX2(count - c1_count(), 1)); - } - assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); - - // Some inlining tuning -#ifdef X86 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2000); - } -#endif - -#if defined SPARC || defined AARCH64 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2500); - } -#endif - - set_increase_threshold_at_ratio(); - set_start_time(os::javaTimeMillis()); -} - -// update_rate() is called from select_task() while holding a compile queue lock. -void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { - // Skip update if counters are absent. - // Can't allocate them since we are holding compile queue lock. - if (m->method_counters() == NULL) return; - - if (is_old(m)) { - // We don't remove old methods from the queue, - // so we can just zero the rate. - m->set_rate(0); - return; - } - - // We don't update the rate if we've just came out of a safepoint. - // delta_s is the time since last safepoint in milliseconds. - jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); - jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement - // How many events were there since the last time? - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - - // We should be running for at least 1ms. - if (delta_s >= TieredRateUpdateMinTime) { - // And we must've taken the previous point at least 1ms before. - if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond - } else { - if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { - // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); - } - } - } -} - -// Check if this method has been stale from a given number of milliseconds. -// See select_task(). -bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { - jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); - jlong delta_t = t - m->prev_time(); - if (delta_t > timeout && delta_s > timeout) { - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - // Return true if there were no events. - return delta_e == 0; - } - return false; -} - -// We don't remove old methods from the compile queue even if they have -// very low activity. See select_task(). -bool AdvancedThresholdPolicy::is_old(Method* method) { - return method->invocation_count() > 50000 || method->backedge_count() > 500000; -} - -double AdvancedThresholdPolicy::weight(Method* method) { - return (double)(method->rate() + 1) * - (method->invocation_count() + 1) * (method->backedge_count() + 1); -} - -// Apply heuristics and return true if x should be compiled before y -bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) { - if (x->highest_comp_level() > y->highest_comp_level()) { - // recompilation after deopt - return true; - } else - if (x->highest_comp_level() == y->highest_comp_level()) { - if (weight(x) > weight(y)) { - return true; - } - } - return false; -} - -// Is method profiled enough? -bool AdvancedThresholdPolicy::is_method_profiled(Method* method) { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - int i = mdo->invocation_count_delta(); - int b = mdo->backedge_count_delta(); - return call_predicate_helper(i, b, 1, method); - } - return false; -} - -// Called with the queue locked and with at least one element -CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) { - CompileTask *max_blocking_task = NULL; - CompileTask *max_task = NULL; - Method* max_method = NULL; - jlong t = os::javaTimeMillis(); - // Iterate through the queue and find a method with a maximum rate. - for (CompileTask* task = compile_queue->first(); task != NULL;) { - CompileTask* next_task = task->next(); - Method* method = task->method(); - update_rate(t, method); - if (max_task == NULL) { - max_task = task; - max_method = method; - } else { - // If a method has been stale for some time, remove it from the queue. - // Blocking tasks and tasks submitted from whitebox API don't become stale - if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { - if (PrintTieredEvents) { - print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); - } - compile_queue->remove_and_mark_stale(task); - method->clear_queued_for_compilation(); - task = next_task; - continue; - } - - // Select a method with a higher rate - if (compare_methods(method, max_method)) { - max_task = task; - max_method = method; - } - } - - if (task->is_blocking()) { - if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { - max_blocking_task = task; - } - } - - task = next_task; - } - - if (max_blocking_task != NULL) { - // In blocking compilation mode, the CompileBroker will make - // compilations submitted by a JVMCI compiler thread non-blocking. These - // compilations should be scheduled after all blocking compilations - // to service non-compiler related compilations sooner and reduce the - // chance of such compilations timing out. - max_task = max_blocking_task; - max_method = max_task->method(); - } - - if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile - && is_method_profiled(max_method)) { - max_task->set_comp_level(CompLevel_limited_profile); - if (PrintTieredEvents) { - print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); - } - } - - return max_task; -} - -double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { - double queue_size = CompileBroker::queue_size(level); - int comp_count = compiler_count(level); - double k = queue_size / (feedback_k * comp_count) + 1; - - // Increase C1 compile threshold when the code cache is filled more - // than specified by IncreaseFirstTierCompileThresholdAt percentage. - // The main intention is to keep enough free space for C2 compiled code - // to achieve peak performance if the code cache is under stress. - if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { - double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); - if (current_reverse_free_ratio > _increase_threshold_at_ratio) { - k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); - } - } - return k; -} - -// Call and loop predicates determine whether a transition to a higher -// compilation level should be performed (pointers to predicate functions -// are passed to common()). -// Tier?LoadFeedback is basically a coefficient that determines of -// how many methods per compiler thread can be in the queue before -// the threshold values double. -bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - default: - return true; - } -} - -bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - default: - return true; - } -} - -// If a method is old enough and is still in the interpreter we would want to -// start profiling without waiting for the compiled method to arrive. -// We also take the load on compilers into the account. -bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { - if (cur_level == CompLevel_none && - CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - int i = method->invocation_count(); - int b = method->backedge_count(); - double k = Tier0ProfilingStartPercentage / 100.0; - return call_predicate_helper(i, b, k, method) || loop_predicate_helper(i, b, k, method); - } - return false; -} - -// Inlining control: if we're compiling a profiled method with C1 and the callee -// is known to have OSRed in a C2 version, don't inline it. -bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { - CompLevel comp_level = (CompLevel)env->comp_level(); - if (comp_level == CompLevel_full_profile || - comp_level == CompLevel_limited_profile) { - return callee->highest_osr_comp_level() == CompLevel_full_optimization; - } - return false; -} - -// Create MDO if necessary. -void AdvancedThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { - if (mh->is_native() || - mh->is_abstract() || - mh->is_accessor() || - mh->is_constant_getter()) { - return; - } - if (mh->method_data() == NULL) { - Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); - } -} - - -/* - * Method states: - * 0 - interpreter (CompLevel_none) - * 1 - pure C1 (CompLevel_simple) - * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) - * 3 - C1 with full profiling (CompLevel_full_profile) - * 4 - C2 (CompLevel_full_optimization) - * - * Common state transition patterns: - * a. 0 -> 3 -> 4. - * The most common path. But note that even in this straightforward case - * profiling can start at level 0 and finish at level 3. - * - * b. 0 -> 2 -> 3 -> 4. - * This case occurs when the load on C2 is deemed too high. So, instead of transitioning - * into state 3 directly and over-profiling while a method is in the C2 queue we transition to - * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. - * - * c. 0 -> (3->2) -> 4. - * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough - * to enable the profiling to fully occur at level 0. In this case we change the compilation level - * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster - * without full profiling while c2 is compiling. - * - * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. - * After a method was once compiled with C1 it can be identified as trivial and be compiled to - * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. - * - * e. 0 -> 4. - * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) - * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because - * the compiled version already exists). - * - * Note that since state 0 can be reached from any other state via deoptimization different loops - * are possible. - * - */ - -// Common transition function. Given a predicate determines if a method should transition to another level. -CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { - CompLevel next_level = cur_level; - int i = method->invocation_count(); - int b = method->backedge_count(); - - if (is_trivial(method)) { - next_level = CompLevel_simple; - } else { - switch(cur_level) { - default: break; - case CompLevel_aot: { - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - break; - case CompLevel_none: - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if ((this->*p)(i, b, cur_level, method)) { -#if INCLUDE_JVMCI - if (EnableJVMCI && UseJVMCICompiler) { - // Since JVMCI takes a while to warm up, its queue inevitably backs up during - // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root - // compilation method and all potential inlinees have mature profiles (which - // includes type profiling). If it sees immature profiles, JVMCI's inliner - // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to - // exploring/inlining too many graphs). Since a rewrite of the inliner is - // in progress, we simply disable the dialing back heuristic for now and will - // revisit this decision once the new inliner is completed. - next_level = CompLevel_full_profile; - } else -#endif - { - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_limited_profile: - if (is_method_profiled(method)) { - // Special case: we got here because this method was fully profiled in the interpreter. - next_level = CompLevel_full_optimization; - } else { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } else { - next_level = CompLevel_full_optimization; - } - } else { - // If there is no MDO we need to profile - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_full_profile: - { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - int mdo_i = mdo->invocation_count_delta(); - int mdo_b = mdo->backedge_count_delta(); - if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = CompLevel_full_optimization; - } - } - } - break; - } - } - return MIN2(next_level, (CompLevel)TieredStopAtLevel); -} - -// Determine if a method should be compiled with a normal entry point at a different level. -CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { - CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), - common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true)); - CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level); - - // If OSR method level is greater than the regular method level, the levels should be - // equalized by raising the regular method level in order to avoid OSRs during each - // invocation of the method. - if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { - MethodData* mdo = method->method_data(); - guarantee(mdo != NULL, "MDO should not be NULL"); - if (mdo->invocation_count() >= 1) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = MAX2(osr_level, next_level); - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - next_level = JVMCIRuntime::adjust_comp_level(method, false, next_level, thread); - } -#endif - return next_level; -} - -// Determine if we should do an OSR compilation of a given method. -CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread * thread) { - CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true); - if (cur_level == CompLevel_none) { - // If there is a live OSR method that means that we deopted to the interpreter - // for the transition. - CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); - if (osr_level > CompLevel_none) { - return osr_level; - } - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread); - } -#endif - return next_level; -} - -// Update the rate and submit compile -void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { - int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); - update_rate(os::javaTimeMillis(), mh()); - CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); -} - -bool AdvancedThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { - if (UseAOT && !delay_compilation_during_startup()) { - if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { - // If the current level is full profile or interpreter and we're switching to any other level, - // activate the AOT code back first so that we won't waste time overprofiling. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Fall through for JIT compilation. - } - if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { - // If the next level is limited profile, use the aot code (if there is any), - // since it's essentially the same thing. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Not need to JIT, we're done. - return true; - } - } - return false; -} - - -// Handle the invocation event. -void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, - CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - CompLevel next_level = call_event(mh(), level, thread); - if (next_level != level) { - if (maybe_switch_to_aot(mh, level, next_level, thread)) { - // No JITting necessary - return; - } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } -} - -// Handle the back branch event. Notice that we can compile the method -// with a regular entry from here. -void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - // Check if MDO should be created for the inlined method - if (should_create_mdo(imh(), level)) { - create_mdo(imh, thread); - } - - if (is_compilation_enabled()) { - CompLevel next_osr_level = loop_event(imh(), level, thread); - CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); - // At the very least compile the OSR version - if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { - compile(imh, bci, next_osr_level, thread); - } - - // Use loop event as an opportunity to also check if there's been - // enough calls. - CompLevel cur_level, next_level; - if (mh() != imh()) { // If there is an enclosing method - if (level == CompLevel_aot) { - // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. - if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); - } - } else { - // Current loop event level is not AOT - guarantee(nm != NULL, "Should have nmethod here"); - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - - if (max_osr_level == CompLevel_full_optimization) { - // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts - bool make_not_entrant = false; - if (nm->is_osr_method()) { - // This is an osr method, just make it not entrant and recompile later if needed - make_not_entrant = true; - } else { - if (next_level != CompLevel_full_optimization) { - // next_level is not full opt, so we need to recompile the - // enclosing method without the inlinee - cur_level = CompLevel_none; - make_not_entrant = true; - } - } - if (make_not_entrant) { - if (PrintTieredEvents) { - int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; - print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); - } - nm->make_not_entrant(); - } - } - // Fix up next_level if necessary to avoid deopts - if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { - next_level = CompLevel_full_profile; - } - if (cur_level != next_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } else { - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - if (next_level != cur_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } -} - -#endif // TIERED diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/advancedThresholdPolicy.hpp --- a/src/hotspot/share/runtime/advancedThresholdPolicy.hpp Tue May 15 18:03:31 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,235 +0,0 @@ -/* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP -#define SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP - -#include "runtime/simpleThresholdPolicy.hpp" - -#ifdef TIERED -class CompileTask; -class CompileQueue; - -/* - * The system supports 5 execution levels: - * * level 0 - interpreter - * * level 1 - C1 with full optimization (no profiling) - * * level 2 - C1 with invocation and backedge counters - * * level 3 - C1 with full profiling (level 2 + MDO) - * * level 4 - C2 - * - * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters - * (invocation counters and backedge counters). The frequency of these notifications is - * different at each level. These notifications are used by the policy to decide what transition - * to make. - * - * Execution starts at level 0 (interpreter), then the policy can decide either to compile the - * method at level 3 or level 2. The decision is based on the following factors: - * 1. The length of the C2 queue determines the next level. The observation is that level 2 - * is generally faster than level 3 by about 30%, therefore we would want to minimize the time - * a method spends at level 3. We should only spend the time at level 3 that is necessary to get - * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to - * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile - * request makes its way through the long queue. When the load on C2 recedes we are going to - * recompile at level 3 and start gathering profiling information. - * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce - * additional filtering if the compiler is overloaded. The rationale is that by the time a - * method gets compiled it can become unused, so it doesn't make sense to put too much onto the - * queue. - * - * After profiling is completed at level 3 the transition is made to level 4. Again, the length - * of the C2 queue is used as a feedback to adjust the thresholds. - * - * After the first C1 compile some basic information is determined about the code like the number - * of the blocks and the number of the loops. Based on that it can be decided that a method - * is trivial and compiling it with C1 will yield the same code. In this case the method is - * compiled at level 1 instead of 4. - * - * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of - * the code and the C2 queue is sufficiently small we can decide to start profiling in the - * interpreter (and continue profiling in the compiled code once the level 3 version arrives). - * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 - * version is compiled instead in order to run faster waiting for a level 4 version. - * - * Compile queues are implemented as priority queues - for each method in the queue we compute - * the event rate (the number of invocation and backedge counter increments per unit of time). - * When getting an element off the queue we pick the one with the largest rate. Maintaining the - * rate also allows us to remove stale methods (the ones that got on the queue but stopped - * being used shortly after that). -*/ - -/* Command line options: - * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method - * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread - * makes a call into the runtime. - * - * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control - * compilation thresholds. - * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. - * Other thresholds work as follows: - * - * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when - * the following predicate is true (X is the level): - * - * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), - * - * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling - * coefficient that will be discussed further. - * The intuition is to equalize the time that is spend profiling each method. - * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be - * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come - * from Method* and for 3->4 transition they come from MDO (since profiled invocations are - * counted separately). Finally, if a method does not contain anything worth profiling, a transition - * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than - * what is specified by Tier4InvocationThreshold). - * - * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. - * - * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending - * on the compiler load. The scaling coefficients are computed as follows: - * - * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, - * - * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X - * is the number of level X compiler threads. - * - * Basically these parameters describe how many methods should be in the compile queue - * per compiler thread before the scaling coefficient increases by one. - * - * This feedback provides the mechanism to automatically control the flow of compilation requests - * depending on the machine speed, mutator load and other external factors. - * - * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. - * Consider the following observation: a method compiled with full profiling (level 3) - * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). - * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue - * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues - * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. - * The idea is to dynamically change the behavior of the system in such a way that if a substantial - * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. - * And then when the load decreases to allow 2->3 transitions. - * - * Tier3Delay* parameters control this switching mechanism. - * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy - * no longer does 0->3 transitions but does 0->2 transitions instead. - * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue - * per compiler thread falls below the specified amount. - * The hysteresis is necessary to avoid jitter. - * - * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. - * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to - * compile from the compile queue, we also can detect stale methods for which the rate has been - * 0 for some time in the same iteration. Stale methods can appear in the queue when an application - * abruptly changes its behavior. - * - * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick - * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything - * with pure c1. - * - * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the - * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the - * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled - * version in time. This reduces the overall transition to level 4 and decreases the startup time. - * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long - * these is not reason to start profiling prematurely. - * - * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. - * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered - * to be zero if no events occurred in TieredRateUpdateMaxTime. - */ - - -class AdvancedThresholdPolicy : public SimpleThresholdPolicy { - jlong _start_time; - - // Call and loop predicates determine whether a transition to a higher compilation - // level should be performed (pointers to predicate functions are passed to common(). - // Predicates also take compiler load into account. - typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method); - bool call_predicate(int i, int b, CompLevel cur_level, Method* method); - bool loop_predicate(int i, int b, CompLevel cur_level, Method* method); - // Common transition function. Given a predicate determines if a method should transition to another level. - CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); - // Transition functions. - // call_event determines if a method should be compiled at a different - // level with a regular invocation entry. - CompLevel call_event(Method* method, CompLevel cur_level, JavaThread * thread); - // loop_event checks if a method should be OSR compiled at a different - // level. - CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread * thread); - // Has a method been long around? - // We don't remove old methods from the compile queue even if they have - // very low activity (see select_task()). - inline bool is_old(Method* method); - // Was a given method inactive for a given number of milliseconds. - // If it is, we would remove it from the queue (see select_task()). - inline bool is_stale(jlong t, jlong timeout, Method* m); - // Compute the weight of the method for the compilation scheduling - inline double weight(Method* method); - // Apply heuristics and return true if x should be compiled before y - inline bool compare_methods(Method* x, Method* y); - // Compute event rate for a given method. The rate is the number of event (invocations + backedges) - // per millisecond. - inline void update_rate(jlong t, Method* m); - // Compute threshold scaling coefficient - inline double threshold_scale(CompLevel level, int feedback_k); - // If a method is old enough and is still in the interpreter we would want to - // start profiling without waiting for the compiled method to arrive. This function - // determines whether we should do that. - inline bool should_create_mdo(Method* method, CompLevel cur_level); - // Create MDO if necessary. - void create_mdo(const methodHandle& mh, JavaThread* thread); - // Is method profiled enough? - bool is_method_profiled(Method* method); - - double _increase_threshold_at_ratio; - - bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); - -protected: - void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); - - void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } - void set_start_time(jlong t) { _start_time = t; } - jlong start_time() const { return _start_time; } - - // Submit a given method for compilation (and update the rate). - virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); - // event() from SimpleThresholdPolicy would call these. - virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, - CompLevel level, CompiledMethod* nm, JavaThread* thread); - virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread); -public: - AdvancedThresholdPolicy() : _start_time(0) { } - // Select task is called by CompileBroker. We should return a task or NULL. - virtual CompileTask* select_task(CompileQueue* compile_queue); - virtual void initialize(); - virtual bool should_not_inline(ciEnv* env, ciMethod* callee); - -}; - -#endif // TIERED - -#endif // SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/arguments.cpp --- a/src/hotspot/share/runtime/arguments.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/arguments.cpp Tue May 15 10:13:52 2018 -0700 @@ -544,6 +544,7 @@ { "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "UseUTCFileTimestamp", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "UseAppCDS", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "InlineNotify", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS { "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() }, @@ -1603,9 +1604,9 @@ } void Arguments::set_tiered_flags() { - // With tiered, set default policy to AdvancedThresholdPolicy, which is 3. + // With tiered, set default policy to SimpleThresholdPolicy, which is 2. if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) { - FLAG_SET_DEFAULT(CompilationPolicyChoice, 3); + FLAG_SET_DEFAULT(CompilationPolicyChoice, 2); } if (CompilationPolicyChoice < 2) { vm_exit_during_initialization( diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/compilationPolicy.cpp --- a/src/hotspot/share/runtime/compilationPolicy.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/compilationPolicy.cpp Tue May 15 10:13:52 2018 -0700 @@ -33,7 +33,6 @@ #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/nativeLookup.hpp" -#include "runtime/advancedThresholdPolicy.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/frame.hpp" #include "runtime/handles.inline.hpp" @@ -74,15 +73,8 @@ Unimplemented(); #endif break; - case 3: -#ifdef TIERED - CompilationPolicy::set_policy(new AdvancedThresholdPolicy()); -#else - Unimplemented(); -#endif - break; default: - fatal("CompilationPolicyChoice must be in the range: [0-3]"); + fatal("CompilationPolicyChoice must be in the range: [0-2]"); } CompilationPolicy::policy()->initialize(); } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/globals.hpp Tue May 15 10:13:52 2018 -0700 @@ -847,8 +847,6 @@ \ experimental(intx, SyncVerbose, 0, "(Unstable)") \ \ - diagnostic(bool, InlineNotify, true, "intrinsify subset of notify") \ - \ experimental(intx, hashCode, 5, \ "(Unstable) select hashCode generation algorithm") \ \ @@ -1158,8 +1156,8 @@ "UseDynamicNumberOfCompilerThreads") \ \ product(intx, CompilationPolicyChoice, 0, \ - "which compilation policy (0-3)") \ - range(0, 3) \ + "which compilation policy (0-2)") \ + range(0, 2) \ \ develop(bool, UseStackBanging, true, \ "use stack banging for stack overflow checks (required for " \ diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/init.cpp --- a/src/hotspot/share/runtime/init.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/init.cpp Tue May 15 10:13:52 2018 -0700 @@ -62,9 +62,6 @@ void gc_barrier_stubs_init(); void interpreter_init(); // before any methods loaded void invocationCounter_init(); // before any methods loaded -#if INCLUDE_SERIALGC -void marksweep_init(); -#endif void accessFlags_init(); void templateTable_init(); void InterfaceSupport_init(); @@ -119,7 +116,6 @@ gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init interpreter_init(); // before any methods loaded invocationCounter_init(); // before any methods loaded - SERIALGC_ONLY(marksweep_init()); accessFlags_init(); templateTable_init(); InterfaceSupport_init(); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/objectMonitor.cpp --- a/src/hotspot/share/runtime/objectMonitor.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/objectMonitor.cpp Tue May 15 10:13:52 2018 -0700 @@ -98,13 +98,14 @@ // The knob* variables are effectively final. Once set they should // never be modified hence. Consider using __read_mostly with GCC. -int ObjectMonitor::Knob_ExitRelease = 0; -int ObjectMonitor::Knob_Verbose = 0; -int ObjectMonitor::Knob_VerifyInUse = 0; -int ObjectMonitor::Knob_VerifyMatch = 0; -int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool - +int ObjectMonitor::Knob_ExitRelease = 0; +int ObjectMonitor::Knob_InlineNotify = 1; +int ObjectMonitor::Knob_Verbose = 0; +int ObjectMonitor::Knob_VerifyInUse = 0; +int ObjectMonitor::Knob_VerifyMatch = 0; +int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool - + static int Knob_ReportSettings = 0; - static int Knob_SpinBase = 0; // Floor AKA SpinMin static int Knob_SpinBackOff = 0; // spin-loop backoff static int Knob_CASPenalty = -1; // Penalty for failed CAS @@ -2319,6 +2320,7 @@ #define SETKNOB(x) { Knob_##x = kvGetInt(knobs, #x, Knob_##x); } SETKNOB(ReportSettings); SETKNOB(ExitRelease); + SETKNOB(InlineNotify); SETKNOB(Verbose); SETKNOB(VerifyInUse); SETKNOB(VerifyMatch); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/objectMonitor.hpp --- a/src/hotspot/share/runtime/objectMonitor.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/objectMonitor.hpp Tue May 15 10:13:52 2018 -0700 @@ -199,6 +199,7 @@ static PerfLongVariable * _sync_MonExtant; static int Knob_ExitRelease; + static int Knob_InlineNotify; static int Knob_Verbose; static int Knob_VerifyInUse; static int Knob_VerifyMatch; diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/perfData.cpp --- a/src/hotspot/share/runtime/perfData.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/perfData.cpp Tue May 15 10:13:52 2018 -0700 @@ -173,7 +173,7 @@ " units = %d, dsize = " SIZE_FORMAT ", vlen = " SIZE_FORMAT "," " pad_length = " SIZE_FORMAT ", size = " SIZE_FORMAT ", on_c_heap = %s," " address = " INTPTR_FORMAT "," - " data address = " INTPTR_FORMAT "\n", + " data address = " INTPTR_FORMAT, cname, dtype, variability(), units(), dsize, vlen, pad_length, size, is_on_c_heap() ? "TRUE":"FALSE", diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/perfMemory.cpp --- a/src/hotspot/share/runtime/perfMemory.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/perfMemory.cpp Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,7 @@ log_debug(perf, memops)("PerfDataMemorySize = " SIZE_FORMAT "," " os::vm_allocation_granularity = %d," - " adjusted size = " SIZE_FORMAT "\n", + " adjusted size = " SIZE_FORMAT, PerfDataMemorySize, os::vm_allocation_granularity(), capacity); @@ -127,7 +127,7 @@ // the PerfMemory region was created as expected. log_debug(perf, memops)("PerfMemory created: address = " INTPTR_FORMAT "," - " size = " SIZE_FORMAT "\n", + " size = " SIZE_FORMAT, p2i(_start), _capacity); diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/simpleThresholdPolicy.cpp --- a/src/hotspot/share/runtime/simpleThresholdPolicy.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/simpleThresholdPolicy.cpp Tue May 15 10:13:52 2018 -0700 @@ -140,20 +140,33 @@ } void SimpleThresholdPolicy::initialize() { - if (FLAG_IS_DEFAULT(CICompilerCount)) { - FLAG_SET_DEFAULT(CICompilerCount, 3); - } int count = CICompilerCount; #ifdef _LP64 - // On 64-bit systems, scale the number of compiler threads with - // the number of cores available on the system. Scaling is not - // performed on 32-bit systems because it can lead to exhaustion - // of the virtual memory address space available to the JVM. + // Turn on ergonomic compiler count selection + if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { + FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); + } if (CICompilerCountPerCPU) { - count = MAX2(log2_intptr(os::active_processor_count()) * 3 / 2, 2); + // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n + int log_cpu = log2_intptr(os::active_processor_count()); + int loglog_cpu = log2_intptr(MAX2(log_cpu, 1)); + count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); + FLAG_SET_ERGO(intx, CICompilerCount, count); + } +#else + // On 32-bit systems, the number of compiler threads is limited to 3. + // On these systems, the virtual address space available to the JVM + // is usually limited to 2-4 GB (the exact value depends on the platform). + // As the compilers (especially C2) can consume a large amount of + // memory, scaling the number of compiler threads with the number of + // available cores can result in the exhaustion of the address space + /// available to the VM and thus cause the VM to crash. + if (FLAG_IS_DEFAULT(CICompilerCount)) { + count = 3; FLAG_SET_ERGO(intx, CICompilerCount, count); } #endif + if (TieredStopAtLevel < CompLevel_full_optimization) { // No C2 compiler thread required set_c1_count(count); @@ -162,6 +175,22 @@ set_c2_count(MAX2(count - c1_count(), 1)); } assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); + + // Some inlining tuning +#ifdef X86 + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2000); + } +#endif + +#if defined SPARC || defined AARCH64 + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2500); + } +#endif + + set_increase_threshold_at_ratio(); + set_start_time(os::javaTimeMillis()); } void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { @@ -186,7 +215,66 @@ // Called with the queue locked and with at least one element CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) { - return select_task_helper(compile_queue); + CompileTask *max_blocking_task = NULL; + CompileTask *max_task = NULL; + Method* max_method = NULL; + jlong t = os::javaTimeMillis(); + // Iterate through the queue and find a method with a maximum rate. + for (CompileTask* task = compile_queue->first(); task != NULL;) { + CompileTask* next_task = task->next(); + Method* method = task->method(); + update_rate(t, method); + if (max_task == NULL) { + max_task = task; + max_method = method; + } else { + // If a method has been stale for some time, remove it from the queue. + // Blocking tasks and tasks submitted from whitebox API don't become stale + if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { + if (PrintTieredEvents) { + print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); + } + compile_queue->remove_and_mark_stale(task); + method->clear_queued_for_compilation(); + task = next_task; + continue; + } + + // Select a method with a higher rate + if (compare_methods(method, max_method)) { + max_task = task; + max_method = method; + } + } + + if (task->is_blocking()) { + if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { + max_blocking_task = task; + } + } + + task = next_task; + } + + if (max_blocking_task != NULL) { + // In blocking compilation mode, the CompileBroker will make + // compilations submitted by a JVMCI compiler thread non-blocking. These + // compilations should be scheduled after all blocking compilations + // to service non-compiler related compilations sooner and reduce the + // chance of such compilations timing out. + max_task = max_blocking_task; + max_method = max_task->method(); + } + + if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile + && is_method_profiled(max_method)) { + max_task->set_comp_level(CompLevel_limited_profile); + if (PrintTieredEvents) { + print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); + } + } + + return max_task; } void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { @@ -284,26 +372,150 @@ } } -// Tell the broker to compile the method +// Update the rate and submit compile void SimpleThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); + update_rate(os::javaTimeMillis(), mh()); CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); } +// Print an event. +void SimpleThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, + int bci, CompLevel level) { + tty->print(" rate="); + if (mh->prev_time() == 0) tty->print("n/a"); + else tty->print("%f", mh->rate()); + + tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), + threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); + +} + +// update_rate() is called from select_task() while holding a compile queue lock. +void SimpleThresholdPolicy::update_rate(jlong t, Method* m) { + // Skip update if counters are absent. + // Can't allocate them since we are holding compile queue lock. + if (m->method_counters() == NULL) return; + + if (is_old(m)) { + // We don't remove old methods from the queue, + // so we can just zero the rate. + m->set_rate(0); + return; + } + + // We don't update the rate if we've just came out of a safepoint. + // delta_s is the time since last safepoint in milliseconds. + jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); + jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement + // How many events were there since the last time? + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + + // We should be running for at least 1ms. + if (delta_s >= TieredRateUpdateMinTime) { + // And we must've taken the previous point at least 1ms before. + if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { + m->set_prev_time(t); + m->set_prev_event_count(event_count); + m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + } else { + if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { + // If nothing happened for 25ms, zero the rate. Don't modify prev values. + m->set_rate(0); + } + } + } +} + +// Check if this method has been stale from a given number of milliseconds. +// See select_task(). +bool SimpleThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { + jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); + jlong delta_t = t - m->prev_time(); + if (delta_t > timeout && delta_s > timeout) { + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + // Return true if there were no events. + return delta_e == 0; + } + return false; +} + +// We don't remove old methods from the compile queue even if they have +// very low activity. See select_task(). +bool SimpleThresholdPolicy::is_old(Method* method) { + return method->invocation_count() > 50000 || method->backedge_count() > 500000; +} + +double SimpleThresholdPolicy::weight(Method* method) { + return (double)(method->rate() + 1) * + (method->invocation_count() + 1) * (method->backedge_count() + 1); +} + +// Apply heuristics and return true if x should be compiled before y +bool SimpleThresholdPolicy::compare_methods(Method* x, Method* y) { + if (x->highest_comp_level() > y->highest_comp_level()) { + // recompilation after deopt + return true; + } else + if (x->highest_comp_level() == y->highest_comp_level()) { + if (weight(x) > weight(y)) { + return true; + } + } + return false; +} + +// Is method profiled enough? +bool SimpleThresholdPolicy::is_method_profiled(Method* method) { + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + int i = mdo->invocation_count_delta(); + int b = mdo->backedge_count_delta(); + return call_predicate_helper(i, b, 1, method); + } + return false; +} + +double SimpleThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { + double queue_size = CompileBroker::queue_size(level); + int comp_count = compiler_count(level); + double k = queue_size / (feedback_k * comp_count) + 1; + + // Increase C1 compile threshold when the code cache is filled more + // than specified by IncreaseFirstTierCompileThresholdAt percentage. + // The main intention is to keep enough free space for C2 compiled code + // to achieve peak performance if the code cache is under stress. + if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { + double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); + if (current_reverse_free_ratio > _increase_threshold_at_ratio) { + k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); + } + } + return k; +} + // Call and loop predicates determine whether a transition to a higher // compilation level should be performed (pointers to predicate functions -// are passed to common() transition function). +// are passed to common()). +// Tier?LoadFeedback is basically a coefficient that determines of +// how many methods per compiler thread can be in the queue before +// the threshold values double. bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { case CompLevel_aot: { - return loop_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper(i, b, k, method); } case CompLevel_none: case CompLevel_limited_profile: { - return loop_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper(i, b, k, method); } case CompLevel_full_profile: { - return loop_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return loop_predicate_helper(i, b, k, method); } default: return true; @@ -313,14 +525,17 @@ bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { case CompLevel_aot: { - return call_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper(i, b, k, method); } case CompLevel_none: case CompLevel_limited_profile: { - return call_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper(i, b, k, method); } case CompLevel_full_profile: { - return call_predicate_helper(i, b, 1.0, method); + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return call_predicate_helper(i, b, k, method); } default: return true; @@ -341,31 +556,167 @@ return false; } +// If a method is old enough and is still in the interpreter we would want to +// start profiling without waiting for the compiled method to arrive. +// We also take the load on compilers into the account. +bool SimpleThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { + if (cur_level == CompLevel_none && + CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + int i = method->invocation_count(); + int b = method->backedge_count(); + double k = Tier0ProfilingStartPercentage / 100.0; + return call_predicate_helper(i, b, k, method) || loop_predicate_helper(i, b, k, method); + } + return false; +} + +// Inlining control: if we're compiling a profiled method with C1 and the callee +// is known to have OSRed in a C2 version, don't inline it. +bool SimpleThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { + CompLevel comp_level = (CompLevel)env->comp_level(); + if (comp_level == CompLevel_full_profile || + comp_level == CompLevel_limited_profile) { + return callee->highest_osr_comp_level() == CompLevel_full_optimization; + } + return false; +} + +// Create MDO if necessary. +void SimpleThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { + if (mh->is_native() || + mh->is_abstract() || + mh->is_accessor() || + mh->is_constant_getter()) { + return; + } + if (mh->method_data() == NULL) { + Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); + } +} + + +/* + * Method states: + * 0 - interpreter (CompLevel_none) + * 1 - pure C1 (CompLevel_simple) + * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) + * 3 - C1 with full profiling (CompLevel_full_profile) + * 4 - C2 (CompLevel_full_optimization) + * + * Common state transition patterns: + * a. 0 -> 3 -> 4. + * The most common path. But note that even in this straightforward case + * profiling can start at level 0 and finish at level 3. + * + * b. 0 -> 2 -> 3 -> 4. + * This case occurs when the load on C2 is deemed too high. So, instead of transitioning + * into state 3 directly and over-profiling while a method is in the C2 queue we transition to + * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. + * + * c. 0 -> (3->2) -> 4. + * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough + * to enable the profiling to fully occur at level 0. In this case we change the compilation level + * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster + * without full profiling while c2 is compiling. + * + * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. + * After a method was once compiled with C1 it can be identified as trivial and be compiled to + * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. + * + * e. 0 -> 4. + * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) + * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because + * the compiled version already exists). + * + * Note that since state 0 can be reached from any other state via deoptimization different loops + * are possible. + * + */ + // Common transition function. Given a predicate determines if a method should transition to another level. -CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level) { +CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { CompLevel next_level = cur_level; int i = method->invocation_count(); int b = method->backedge_count(); - if (is_trivial(method) && cur_level != CompLevel_aot) { + if (is_trivial(method)) { next_level = CompLevel_simple; } else { switch(cur_level) { - case CompLevel_aot: { - if ((this->*p)(i, b, cur_level, method)) { + default: break; + case CompLevel_aot: { + // If we were at full profile level, would we switch to full opt? + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { next_level = CompLevel_full_profile; } } break; case CompLevel_none: // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) { + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { next_level = CompLevel_full_optimization; } else if ((this->*p)(i, b, cur_level, method)) { - next_level = CompLevel_full_profile; +#if INCLUDE_JVMCI + if (EnableJVMCI && UseJVMCICompiler) { + // Since JVMCI takes a while to warm up, its queue inevitably backs up during + // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root + // compilation method and all potential inlinees have mature profiles (which + // includes type profiling). If it sees immature profiles, JVMCI's inliner + // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to + // exploring/inlining too many graphs). Since a rewrite of the inliner is + // in progress, we simply disable the dialing back heuristic for now and will + // revisit this decision once the new inliner is completed. + next_level = CompLevel_full_profile; + } else +#endif + { + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + next_level = CompLevel_limited_profile; + } else { + next_level = CompLevel_full_profile; + } + } } break; case CompLevel_limited_profile: + if (is_method_profiled(method)) { + // Special case: we got here because this method was fully profiled in the interpreter. + next_level = CompLevel_full_optimization; + } else { + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + if (mdo->would_profile()) { + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } else { + next_level = CompLevel_full_optimization; + } + } else { + // If there is no MDO we need to profile + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } + } + break; case CompLevel_full_profile: { MethodData* mdo = method->method_data(); @@ -382,17 +733,15 @@ } } break; - default: - break; } } return MIN2(next_level, (CompLevel)TieredStopAtLevel); } // Determine if a method should be compiled with a normal entry point at a different level. -CompLevel SimpleThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread* thread) { +CompLevel SimpleThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), - common(&SimpleThresholdPolicy::loop_predicate, method, cur_level)); + common(&SimpleThresholdPolicy::loop_predicate, method, cur_level, true)); CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level); // If OSR method level is greater than the regular method level, the levels should be @@ -417,7 +766,7 @@ // Determine if we should do an OSR compilation of a given method. CompLevel SimpleThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) { - CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level); + CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level, true); if (cur_level == CompLevel_none) { // If there is a live OSR method that means that we deopted to the interpreter // for the transition. @@ -434,13 +783,39 @@ return next_level; } +bool SimpleThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { + if (UseAOT && !delay_compilation_during_startup()) { + if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { + // If the current level is full profile or interpreter and we're switching to any other level, + // activate the AOT code back first so that we won't waste time overprofiling. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Fall through for JIT compilation. + } + if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { + // If the next level is limited profile, use the aot code (if there is any), + // since it's essentially the same thing. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Not need to JIT, we're done. + return true; + } + } + return false; +} + // Handle the invocation event. void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, - CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - CompLevel next_level = call_event(mh(), level, thread); - if (next_level != level) { + CompLevel level, CompiledMethod* nm, JavaThread* thread) { + if (should_create_mdo(mh(), level)) { + create_mdo(mh, thread); + } + CompLevel next_level = call_event(mh(), level, thread); + if (next_level != level) { + if (maybe_switch_to_aot(mh, level, next_level, thread)) { + // No JITting necessary + return; + } + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { compile(mh, InvocationEntryBci, next_level, thread); } } @@ -450,25 +825,77 @@ // with a regular entry from here. void SimpleThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { - // If the method is already compiling, quickly bail out. - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - // Use loop event as an opportunity to also check there's been - // enough calls. - CompLevel cur_level = comp_level(mh()); - CompLevel next_level = call_event(mh(), cur_level, thread); - CompLevel next_osr_level = loop_event(mh(), level, thread); + if (should_create_mdo(mh(), level)) { + create_mdo(mh, thread); + } + // Check if MDO should be created for the inlined method + if (should_create_mdo(imh(), level)) { + create_mdo(imh, thread); + } - next_level = MAX2(next_level, - next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level); - bool is_compiling = false; - if (next_level != cur_level) { - compile(mh, InvocationEntryBci, next_level, thread); - is_compiling = true; + if (is_compilation_enabled()) { + CompLevel next_osr_level = loop_event(imh(), level, thread); + CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); + // At the very least compile the OSR version + if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { + compile(imh, bci, next_osr_level, thread); } - // Do the OSR version - if (!is_compiling && next_osr_level != level) { - compile(mh, bci, next_osr_level, thread); + // Use loop event as an opportunity to also check if there's been + // enough calls. + CompLevel cur_level, next_level; + if (mh() != imh()) { // If there is an enclosing method + if (level == CompLevel_aot) { + // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. + if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); + } + } else { + // Current loop event level is not AOT + guarantee(nm != NULL, "Should have nmethod here"); + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + + if (max_osr_level == CompLevel_full_optimization) { + // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts + bool make_not_entrant = false; + if (nm->is_osr_method()) { + // This is an osr method, just make it not entrant and recompile later if needed + make_not_entrant = true; + } else { + if (next_level != CompLevel_full_optimization) { + // next_level is not full opt, so we need to recompile the + // enclosing method without the inlinee + cur_level = CompLevel_none; + make_not_entrant = true; + } + } + if (make_not_entrant) { + if (PrintTieredEvents) { + int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; + print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); + } + nm->make_not_entrant(); + } + } + // Fix up next_level if necessary to avoid deopts + if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { + next_level = CompLevel_full_profile; + } + if (cur_level != next_level) { + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } + } + } + } else { + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + if (next_level != cur_level) { + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } + } } } } diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/simpleThresholdPolicy.hpp --- a/src/hotspot/share/runtime/simpleThresholdPolicy.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/simpleThresholdPolicy.hpp Tue May 15 10:13:52 2018 -0700 @@ -34,8 +34,136 @@ class CompileTask; class CompileQueue; +/* + * The system supports 5 execution levels: + * * level 0 - interpreter + * * level 1 - C1 with full optimization (no profiling) + * * level 2 - C1 with invocation and backedge counters + * * level 3 - C1 with full profiling (level 2 + MDO) + * * level 4 - C2 + * + * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters + * (invocation counters and backedge counters). The frequency of these notifications is + * different at each level. These notifications are used by the policy to decide what transition + * to make. + * + * Execution starts at level 0 (interpreter), then the policy can decide either to compile the + * method at level 3 or level 2. The decision is based on the following factors: + * 1. The length of the C2 queue determines the next level. The observation is that level 2 + * is generally faster than level 3 by about 30%, therefore we would want to minimize the time + * a method spends at level 3. We should only spend the time at level 3 that is necessary to get + * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to + * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile + * request makes its way through the long queue. When the load on C2 recedes we are going to + * recompile at level 3 and start gathering profiling information. + * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce + * additional filtering if the compiler is overloaded. The rationale is that by the time a + * method gets compiled it can become unused, so it doesn't make sense to put too much onto the + * queue. + * + * After profiling is completed at level 3 the transition is made to level 4. Again, the length + * of the C2 queue is used as a feedback to adjust the thresholds. + * + * After the first C1 compile some basic information is determined about the code like the number + * of the blocks and the number of the loops. Based on that it can be decided that a method + * is trivial and compiling it with C1 will yield the same code. In this case the method is + * compiled at level 1 instead of 4. + * + * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of + * the code and the C2 queue is sufficiently small we can decide to start profiling in the + * interpreter (and continue profiling in the compiled code once the level 3 version arrives). + * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 + * version is compiled instead in order to run faster waiting for a level 4 version. + * + * Compile queues are implemented as priority queues - for each method in the queue we compute + * the event rate (the number of invocation and backedge counter increments per unit of time). + * When getting an element off the queue we pick the one with the largest rate. Maintaining the + * rate also allows us to remove stale methods (the ones that got on the queue but stopped + * being used shortly after that). +*/ + +/* Command line options: + * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method + * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread + * makes a call into the runtime. + * + * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control + * compilation thresholds. + * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. + * Other thresholds work as follows: + * + * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when + * the following predicate is true (X is the level): + * + * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), + * + * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling + * coefficient that will be discussed further. + * The intuition is to equalize the time that is spend profiling each method. + * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be + * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come + * from Method* and for 3->4 transition they come from MDO (since profiled invocations are + * counted separately). Finally, if a method does not contain anything worth profiling, a transition + * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than + * what is specified by Tier4InvocationThreshold). + * + * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. + * + * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending + * on the compiler load. The scaling coefficients are computed as follows: + * + * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, + * + * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X + * is the number of level X compiler threads. + * + * Basically these parameters describe how many methods should be in the compile queue + * per compiler thread before the scaling coefficient increases by one. + * + * This feedback provides the mechanism to automatically control the flow of compilation requests + * depending on the machine speed, mutator load and other external factors. + * + * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. + * Consider the following observation: a method compiled with full profiling (level 3) + * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). + * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue + * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues + * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. + * The idea is to dynamically change the behavior of the system in such a way that if a substantial + * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. + * And then when the load decreases to allow 2->3 transitions. + * + * Tier3Delay* parameters control this switching mechanism. + * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy + * no longer does 0->3 transitions but does 0->2 transitions instead. + * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue + * per compiler thread falls below the specified amount. + * The hysteresis is necessary to avoid jitter. + * + * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. + * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to + * compile from the compile queue, we also can detect stale methods for which the rate has been + * 0 for some time in the same iteration. Stale methods can appear in the queue when an application + * abruptly changes its behavior. + * + * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick + * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything + * with pure c1. + * + * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the + * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the + * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled + * version in time. This reduces the overall transition to level 4 and decreases the startup time. + * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long + * these is not reason to start profiling prematurely. + * + * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. + * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered + * to be zero if no events occurred in TieredRateUpdateMaxTime. + */ class SimpleThresholdPolicy : public CompilationPolicy { + jlong _start_time; int _c1_count, _c2_count; // Check if the counter is big enough and set carry (effectively infinity). @@ -49,7 +177,7 @@ bool call_predicate(int i, int b, CompLevel cur_level, Method* method); bool loop_predicate(int i, int b, CompLevel cur_level, Method* method); // Common transition function. Given a predicate determines if a method should transition to another level. - CompLevel common(Predicate p, Method* method, CompLevel cur_level); + CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); // Transition functions. // call_event determines if a method should be compiled at a different // level with a regular invocation entry. @@ -58,6 +186,35 @@ // level. CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread); void print_counters(const char* prefix, const methodHandle& mh); + // Has a method been long around? + // We don't remove old methods from the compile queue even if they have + // very low activity (see select_task()). + inline bool is_old(Method* method); + // Was a given method inactive for a given number of milliseconds. + // If it is, we would remove it from the queue (see select_task()). + inline bool is_stale(jlong t, jlong timeout, Method* m); + // Compute the weight of the method for the compilation scheduling + inline double weight(Method* method); + // Apply heuristics and return true if x should be compiled before y + inline bool compare_methods(Method* x, Method* y); + // Compute event rate for a given method. The rate is the number of event (invocations + backedges) + // per millisecond. + inline void update_rate(jlong t, Method* m); + // Compute threshold scaling coefficient + inline double threshold_scale(CompLevel level, int feedback_k); + // If a method is old enough and is still in the interpreter we would want to + // start profiling without waiting for the compiled method to arrive. This function + // determines whether we should do that. + inline bool should_create_mdo(Method* method, CompLevel cur_level); + // Create MDO if necessary. + void create_mdo(const methodHandle& mh, JavaThread* thread); + // Is method profiled enough? + bool is_method_profiled(Method* method); + + double _increase_threshold_at_ratio; + + bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); + protected: int c1_count() const { return _c1_count; } int c2_count() const { return _c2_count; } @@ -67,7 +224,7 @@ enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); // Print policy-specific information if necessary - virtual void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level) { } + virtual void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); // Check if the method can be compiled, change level if necessary void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); // Submit a given method for compilation @@ -87,8 +244,13 @@ CompLevel level, CompiledMethod* nm, JavaThread* thread); virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread); + + void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } + void set_start_time(jlong t) { _start_time = t; } + jlong start_time() const { return _start_time; } + public: - SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { } + SimpleThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { } virtual int compiler_count(CompLevel comp_level) { if (is_c1_compile(comp_level)) return c1_count(); if (is_c2_compile(comp_level)) return c2_count(); @@ -107,11 +269,7 @@ virtual bool is_mature(Method* method); // Initialize: set compiler thread count virtual void initialize(); - virtual bool should_not_inline(ciEnv* env, ciMethod* callee) { - return (env->comp_level() == CompLevel_limited_profile || - env->comp_level() == CompLevel_full_profile) && - callee->has_loops(); - } + virtual bool should_not_inline(ciEnv* env, ciMethod* callee); }; #endif // TIERED diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/thread.cpp Tue May 15 10:13:52 2018 -0700 @@ -114,9 +114,6 @@ #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" #include "utilities/vmError.hpp" -#if INCLUDE_PARALLELGC -#include "gc/parallel/pcTasks.hpp" -#endif #if INCLUDE_JVMCI #include "jvmci/jvmciCompiler.hpp" #include "jvmci/jvmciRuntime.hpp" @@ -1470,7 +1467,6 @@ void JavaThread::collect_counters(typeArrayOop array) { if (JVMCICounterSize > 0) { - MutexLocker tl(Threads_lock); JavaThreadIteratorWithHandle jtiwh; for (int i = 0; i < array->length(); i++) { array->long_at_put(i, _jvmci_old_thread_counters[i]); @@ -3436,13 +3432,25 @@ // If CompilerThreads ever become non-JavaThreads, add them here } -// All JavaThreads + all non-JavaThreads (i.e., every thread in the system). -void Threads::threads_do(ThreadClosure* tc) { +// All JavaThreads +void Threads::java_threads_do(ThreadClosure* tc) { assert_locked_or_safepoint(Threads_lock); // ALL_JAVA_THREADS iterates through all JavaThreads. ALL_JAVA_THREADS(p) { tc->do_thread(p); } +} + +void Threads::java_threads_and_vm_thread_do(ThreadClosure* tc) { + assert_locked_or_safepoint(Threads_lock); + java_threads_do(tc); + tc->do_thread(VMThread::vm_thread()); +} + +// All JavaThreads + all non-JavaThreads (i.e., every thread in the system). +void Threads::threads_do(ThreadClosure* tc) { + assert_locked_or_safepoint(Threads_lock); + java_threads_do(tc); non_java_threads_do(tc); } @@ -4465,24 +4473,6 @@ possibly_parallel_threads_do(is_par, &tc); } -#if INCLUDE_PARALLELGC -// Used by ParallelScavenge -void Threads::create_thread_roots_tasks(GCTaskQueue* q) { - ALL_JAVA_THREADS(p) { - q->enqueue(new ThreadRootsTask(p)); - } - q->enqueue(new ThreadRootsTask(VMThread::vm_thread())); -} - -// Used by Parallel Old -void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) { - ALL_JAVA_THREADS(p) { - q->enqueue(new ThreadRootsMarkingTask(p)); - } - q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread())); -} -#endif // INCLUDE_PARALLELGC - void Threads::nmethods_do(CodeBlobClosure* cf) { ALL_JAVA_THREADS(p) { // This is used by the code cache sweeper to mark nmethods that are active diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/runtime/thread.hpp Tue May 15 10:13:52 2018 -0700 @@ -2104,6 +2104,8 @@ static void add(JavaThread* p, bool force_daemon = false); static void remove(JavaThread* p); static void non_java_threads_do(ThreadClosure* tc); + static void java_threads_do(ThreadClosure* tc); + static void java_threads_and_vm_thread_do(ThreadClosure* tc); static void threads_do(ThreadClosure* tc); static void possibly_parallel_threads_do(bool is_par, ThreadClosure* tc); @@ -2142,10 +2144,6 @@ static void oops_do(OopClosure* f, CodeBlobClosure* cf); // This version may be called by sequential or parallel code. static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CodeBlobClosure* cf); - // This creates a list of GCTasks, one per thread. - static void create_thread_roots_tasks(GCTaskQueue* q); - // This creates a list of GCTasks, one per thread, for marking objects. - static void create_thread_roots_marking_tasks(GCTaskQueue* q); // Apply "f->do_oop" to roots in all threads that // are part of compiled frames diff -r 9822dd521c15 -r d93ae85b18c1 src/hotspot/share/services/threadService.cpp --- a/src/hotspot/share/services/threadService.cpp Tue May 15 18:03:31 2018 +0530 +++ b/src/hotspot/share/services/threadService.cpp Tue May 15 10:13:52 2018 -0700 @@ -122,6 +122,9 @@ void ThreadService::remove_thread(JavaThread* thread, bool daemon) { Atomic::dec(&_exiting_threads_count); + if (daemon) { + Atomic::dec(&_exiting_daemon_threads_count); + } if (thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread()) { @@ -129,10 +132,8 @@ } _live_threads_count->set_value(_live_threads_count->get_value() - 1); - if (daemon) { _daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1); - Atomic::dec(&_exiting_daemon_threads_count); } } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/String.java --- a/src/java.base/share/classes/java/lang/String.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/String.java Tue May 15 10:13:52 2018 -0700 @@ -2602,7 +2602,7 @@ * Returns a string whose value is this string, with all leading * and trailing space removed, where space is defined * as any character whose codepoint is less than or equal to - * {@code '\u005Cu0020'} (the space character). + * {@code 'U+0020'} (the space character). *

* If this {@code String} object represents an empty character * sequence, or the first and last characters of character sequence @@ -2637,6 +2637,98 @@ } /** + * Returns a string whose value is this string, with all leading + * and trailing {@link Character#isWhitespace(int) white space} + * removed. + *

+ * If this {@code String} object represents an empty string, + * or if all code points in this string are + * {@link Character#isWhitespace(int) white space}, then an empty string + * is returned. + *

+ * Otherwise, returns a substring of this string beginning with the first + * code point that is not a {@link Character#isWhitespace(int) white space} + * up to and including the last code point that is not a + * {@link Character#isWhitespace(int) white space}. + *

+ * This method may be used to strip + * {@link Character#isWhitespace(int) white space} from + * the beginning and end of a string. + * + * @return a string whose value is this string, with all leading + * and trailing white space removed + * + * @see Character#isWhitespace(int) + * + * @since 11 + */ + public String strip() { + String ret = isLatin1() ? StringLatin1.strip(value) + : StringUTF16.strip(value); + return ret == null ? this : ret; + } + + /** + * Returns a string whose value is this string, with all leading + * {@link Character#isWhitespace(int) white space} removed. + *

+ * If this {@code String} object represents an empty string, + * or if all code points in this string are + * {@link Character#isWhitespace(int) white space}, then an empty string + * is returned. + *

+ * Otherwise, returns a substring of this string beginning with the first + * code point that is not a {@link Character#isWhitespace(int) white space} + * up to to and including the last code point of this string. + *

+ * This method may be used to trim + * {@link Character#isWhitespace(int) white space} from + * the beginning of a string. + * + * @return a string whose value is this string, with all leading white + * space removed + * + * @see Character#isWhitespace(int) + * + * @since 11 + */ + public String stripLeading() { + String ret = isLatin1() ? StringLatin1.stripLeading(value) + : StringUTF16.stripLeading(value); + return ret == null ? this : ret; + } + + /** + * Returns a string whose value is this string, with all trailing + * {@link Character#isWhitespace(int) white space} removed. + *

+ * If this {@code String} object represents an empty string, + * or if all characters in this string are + * {@link Character#isWhitespace(int) white space}, then an empty string + * is returned. + *

+ * Otherwise, returns a substring of this string beginning with the first + * code point of this string up to and including the last code point + * that is not a {@link Character#isWhitespace(int) white space}. + *

+ * This method may be used to trim + * {@link Character#isWhitespace(int) white space} from + * the end of a string. + * + * @return a string whose value is this string, with all trailing white + * space removed + * + * @see Character#isWhitespace(int) + * + * @since 11 + */ + public String stripTrailing() { + String ret = isLatin1() ? StringLatin1.stripTrailing(value) + : StringUTF16.stripTrailing(value); + return ret == null ? this : ret; + } + + /** * This object (which is already a string!) is itself returned. * * @return the string itself. diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/StringLatin1.java --- a/src/java.base/share/classes/java/lang/StringLatin1.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/StringLatin1.java Tue May 15 10:13:52 2018 -0700 @@ -538,6 +538,57 @@ newString(value, st, len - st) : null; } + public static int indexOfNonWhitespace(byte[] value) { + int length = value.length; + int left = 0; + while (left < length) { + char ch = (char)(value[left] & 0xff); + if (ch != ' ' && ch != '\t' && !Character.isWhitespace(ch)) { + break; + } + left++; + } + return left; + } + + public static int lastIndexOfNonWhitespace(byte[] value) { + int length = value.length; + int right = length; + while (0 < right) { + char ch = (char)(value[right - 1] & 0xff); + if (ch != ' ' && ch != '\t' && !Character.isWhitespace(ch)) { + break; + } + right--; + } + return right; + } + + public static String strip(byte[] value) { + int left = indexOfNonWhitespace(value); + if (left == value.length) { + return ""; + } + int right = lastIndexOfNonWhitespace(value); + return ((left > 0) || (right < value.length)) ? newString(value, left, right - left) : null; + } + + public static String stripLeading(byte[] value) { + int left = indexOfNonWhitespace(value); + if (left == value.length) { + return ""; + } + return (left != 0) ? newString(value, left, value.length - left) : null; + } + + public static String stripTrailing(byte[] value) { + int right = lastIndexOfNonWhitespace(value); + if (right == 0) { + return ""; + } + return (right != value.length) ? newString(value, 0, right) : null; + } + public static void putChar(byte[] val, int index, int c) { //assert (canEncode(c)); val[index] = (byte)(c); diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/StringUTF16.java --- a/src/java.base/share/classes/java/lang/StringUTF16.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/StringUTF16.java Tue May 15 10:13:52 2018 -0700 @@ -856,6 +856,61 @@ null; } + + public static int indexOfNonWhitespace(byte[] value) { + int length = value.length >> 1; + int left = 0; + while (left < length) { + int codepoint = codePointAt(value, left, length); + if (codepoint != ' ' && codepoint != '\t' && !Character.isWhitespace(codepoint)) { + break; + } + left += Character.charCount(codepoint); + } + return left; + } + + public static int lastIndexOfNonWhitespace(byte[] value) { + int length = value.length >> 1; + int right = length; + while (0 < right) { + int codepoint = codePointBefore(value, right); + if (codepoint != ' ' && codepoint != '\t' && !Character.isWhitespace(codepoint)) { + break; + } + right -= Character.charCount(codepoint); + } + return right; + } + + public static String strip(byte[] value) { + int length = value.length >> 1; + int left = indexOfNonWhitespace(value); + if (left == length) { + return ""; + } + int right = lastIndexOfNonWhitespace(value); + return ((left > 0) || (right < length)) ? newString(value, left, right - left) : null; + } + + public static String stripLeading(byte[] value) { + int length = value.length >> 1; + int left = indexOfNonWhitespace(value); + if (left == length) { + return ""; + } + return (left != 0) ? newString(value, left, length - left) : null; + } + + public static String stripTrailing(byte[] value) { + int length = value.length >> 1; + int right = lastIndexOfNonWhitespace(value); + if (right == 0) { + return ""; + } + return (right != length) ? newString(value, 0, right) : null; + } + private static void putChars(byte[] val, int index, char[] str, int off, int end) { while (off < end) { putChar(val, index++, str[off++]); diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/AccessibleObject.java --- a/src/java.base/share/classes/java/lang/reflect/AccessibleObject.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/AccessibleObject.java Tue May 15 10:13:52 2018 -0700 @@ -564,7 +564,6 @@ throw new AssertionError("All subclasses should override this method"); } - // Shared access checking logic. // For non-public members or members in package-private classes, @@ -674,4 +673,13 @@ } return printStackWhenAccessFails; } + + /** + * Returns the root AccessibleObject; or null if this object is the root. + * + * All subclasses override this method. + */ + AccessibleObject getRoot() { + throw new InternalError(); + } } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/Constructor.java --- a/src/java.base/share/classes/java/lang/reflect/Constructor.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/Constructor.java Tue May 15 10:13:52 2018 -0700 @@ -103,11 +103,8 @@ // occur in annotation code. private Constructor root; - /** - * Used by Excecutable for annotation sharing. - */ @Override - Executable getRoot() { + Constructor getRoot() { return root; } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/Executable.java --- a/src/java.base/share/classes/java/lang/reflect/Executable.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/Executable.java Tue May 15 10:13:52 2018 -0700 @@ -56,11 +56,6 @@ abstract byte[] getAnnotationBytes(); /** - * Accessor method to allow code sharing - */ - abstract Executable getRoot(); - - /** * Does the Executable have generic information. */ abstract boolean hasGenericInformation(); @@ -602,7 +597,7 @@ if ((declAnnos = declaredAnnotations) == null) { synchronized (this) { if ((declAnnos = declaredAnnotations) == null) { - Executable root = getRoot(); + Executable root = (Executable)getRoot(); if (root != null) { declAnnos = root.declaredAnnotations(); } else { diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/Field.java --- a/src/java.base/share/classes/java/lang/reflect/Field.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/Field.java Tue May 15 10:13:52 2018 -0700 @@ -1128,6 +1128,11 @@ } } + @Override + Field getRoot() { + return root; + } + /** * @throws NullPointerException {@inheritDoc} * @since 1.5 diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/Method.java --- a/src/java.base/share/classes/java/lang/reflect/Method.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/Method.java Tue May 15 10:13:52 2018 -0700 @@ -198,11 +198,8 @@ checkCanSetAccessible(caller, clazz); } - /** - * Used by Excecutable for annotation sharing. - */ @Override - Executable getRoot() { + Method getRoot() { return root; } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/lang/reflect/ReflectAccess.java --- a/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java Tue May 15 10:13:52 2018 -0700 @@ -154,4 +154,9 @@ public Constructor copyConstructor(Constructor arg) { return arg.copy(); } + + @SuppressWarnings("unchecked") + public T getRoot(T obj) { + return (T) obj.getRoot(); + } } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/math/BigInteger.java --- a/src/java.base/share/classes/java/math/BigInteger.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/math/BigInteger.java Tue May 15 10:13:52 2018 -0700 @@ -52,19 +52,17 @@ * and a few other miscellaneous operations. * *

Semantics of arithmetic operations exactly mimic those of Java's integer - * arithmetic operators, as defined in The Java Language Specification. + * arithmetic operators, as defined in The Java™ Language Specification. * For example, division by zero throws an {@code ArithmeticException}, and * division of a negative by a positive yields a negative (or zero) remainder. - * All of the details in the Spec concerning overflow are ignored, as - * BigIntegers are made as large as necessary to accommodate the results of an - * operation. * *

Semantics of shift operations extend those of Java's shift operators * to allow for negative shift distances. A right-shift with a negative * shift distance results in a left shift, and vice-versa. The unsigned - * right shift operator ({@code >>>}) is omitted, as this operation makes - * little sense in combination with the "infinite word size" abstraction - * provided by this class. + * right shift operator ({@code >>>}) is omitted since this operation + * only makes sense for a fixed sized word and not for a + * representation conceptually having an infinite number of leading + * virtual sign bits. * *

Semantics of bitwise logical operations exactly mimic those of Java's * bitwise integer operators. The binary operators ({@code and}, @@ -84,8 +82,8 @@ * extended so that it contains the designated bit. None of the single-bit * operations can produce a BigInteger with a different sign from the * BigInteger being operated on, as they affect only a single bit, and the - * "infinite word size" abstraction provided by this class ensures that there - * are infinitely many "virtual sign bits" preceding each BigInteger. + * arbitrarily large abstraction provided by this class ensures that conceptually + * there are infinitely many "virtual sign bits" preceding each BigInteger. * *

For the sake of brevity and clarity, pseudo-code is used throughout the * descriptions of BigInteger methods. The pseudo-code expression @@ -105,13 +103,18 @@ * +2{@code Integer.MAX_VALUE} (exclusive) * and may support values outside of that range. * + * An {@code ArithmeticException} is thrown when a BigInteger + * constructor or method would generate a value outside of the + * supported range. + * * The range of probable prime values is limited and may be less than * the full supported positive range of {@code BigInteger}. * The range must be at least 1 to 2500000000. * * @implNote - * BigInteger constructors and operations throw {@code ArithmeticException} when - * the result is out of the supported range of + * In the reference implementation, BigInteger constructors and + * operations throw {@code ArithmeticException} when the result is out + * of the supported range of * -2{@code Integer.MAX_VALUE} (exclusive) to * +2{@code Integer.MAX_VALUE} (exclusive). * diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/java/util/Objects.java --- a/src/java.base/share/classes/java/util/Objects.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/java/util/Objects.java Tue May 15 10:13:52 2018 -0700 @@ -35,16 +35,16 @@ * on objects, or checking certain conditions before operation. These utilities * include {@code null}-safe or {@code null}-tolerant methods for computing the * hash code of an object, returning a string for an object, comparing two - * objects, and checking if indexes or sub-range values are out-of-bounds. + * objects, and checking if indexes or sub-range values are out of bounds. * * @apiNote * Static methods such as {@link Objects#checkIndex}, * {@link Objects#checkFromToIndex}, and {@link Objects#checkFromIndexSize} are * provided for the convenience of checking if values corresponding to indexes - * and sub-ranges are out-of-bounds. + * and sub-ranges are out of bounds. * Variations of these static methods support customization of the runtime * exception, and corresponding exception detail message, that is thrown when - * values are out-of-bounds. Such methods accept a functional interface + * values are out of bounds. Such methods accept a functional interface * argument, instances of {@code BiFunction}, that maps out-of-bound values to a * runtime exception. Care should be taken when using such methods in * combination with an argument that is a lambda expression, method reference or @@ -352,7 +352,7 @@ * Checks if the {@code index} is within the bounds of the range from * {@code 0} (inclusive) to {@code length} (exclusive). * - *

The {@code index} is defined to be out-of-bounds if any of the + *

The {@code index} is defined to be out of bounds if any of the * following inequalities is true: *

    *
  • {@code index < 0}
  • @@ -363,7 +363,7 @@ * @param index the index * @param length the upper-bound (exclusive) of the range * @return {@code index} if it is within bounds of the range - * @throws IndexOutOfBoundsException if the {@code index} is out-of-bounds + * @throws IndexOutOfBoundsException if the {@code index} is out of bounds * @since 9 */ @ForceInline @@ -377,7 +377,7 @@ * {@code toIndex} (exclusive) is within the bounds of range from {@code 0} * (inclusive) to {@code length} (exclusive). * - *

    The sub-range is defined to be out-of-bounds if any of the following + *

    The sub-range is defined to be out of bounds if any of the following * inequalities is true: *

      *
    • {@code fromIndex < 0}
    • @@ -390,7 +390,7 @@ * @param toIndex the upper-bound (exclusive) of the sub-range * @param length the upper-bound (exclusive) the range * @return {@code fromIndex} if the sub-range within bounds of the range - * @throws IndexOutOfBoundsException if the sub-range is out-of-bounds + * @throws IndexOutOfBoundsException if the sub-range is out of bounds * @since 9 */ public static @@ -403,7 +403,7 @@ * {@code fromIndex + size} (exclusive) is within the bounds of range from * {@code 0} (inclusive) to {@code length} (exclusive). * - *

      The sub-range is defined to be out-of-bounds if any of the following + *

      The sub-range is defined to be out of bounds if any of the following * inequalities is true: *

        *
      • {@code fromIndex < 0}
      • @@ -416,7 +416,7 @@ * @param size the size of the sub-range * @param length the upper-bound (exclusive) of the range * @return {@code fromIndex} if the sub-range within bounds of the range - * @throws IndexOutOfBoundsException if the sub-range is out-of-bounds + * @throws IndexOutOfBoundsException if the sub-range is out of bounds * @since 9 */ public static diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/jdk/internal/reflect/LangReflectAccess.java --- a/src/java.base/share/classes/jdk/internal/reflect/LangReflectAccess.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/reflect/LangReflectAccess.java Tue May 15 10:13:52 2018 -0700 @@ -115,4 +115,7 @@ /** Makes a "child" copy of a Constructor */ public Constructor copyConstructor(Constructor arg); + + /** Gets the root of the given AccessibleObject object; null if arg is the root */ + public T getRoot(T obj); } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java --- a/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java Tue May 15 10:13:52 2018 -0700 @@ -39,7 +39,6 @@ import java.lang.reflect.Method; import java.lang.reflect.Constructor; import java.lang.reflect.Modifier; -import java.security.Permission; import java.security.PrivilegedAction; import java.util.Objects; import java.util.Properties; @@ -172,6 +171,15 @@ */ public FieldAccessor newFieldAccessor(Field field, boolean override) { checkInitted(); + + Field root = langReflectAccess.getRoot(field); + if (root != null) { + // FieldAccessor will use the root unless the modifiers have + // been overrridden + if (root.getModifiers() == field.getModifiers() || !override) { + field = root; + } + } return UnsafeFieldAccessorFactory.newFieldAccessor(field, override); } @@ -185,6 +193,12 @@ } } + // use the root Method that will not cache caller class + Method root = langReflectAccess.getRoot(method); + if (root != null) { + method = root; + } + if (noInflation && !ReflectUtil.isVMAnonymousClass(method.getDeclaringClass())) { return new MethodAccessorGenerator(). generateMethod(method.getDeclaringClass(), @@ -214,6 +228,13 @@ return new InstantiationExceptionConstructorAccessorImpl ("Can not instantiate java.lang.Class"); } + + // use the root Constructor that will not cache caller class + Constructor root = langReflectAccess.getRoot(c); + if (root != null) { + c = root; + } + // Bootstrapping issue: since we use Class.newInstance() in // the ConstructorAccessor generation process, we have to // break the cycle here. diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/jdk/internal/util/Preconditions.java --- a/src/java.base/share/classes/jdk/internal/util/Preconditions.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/util/Preconditions.java Tue May 15 10:13:52 2018 -0700 @@ -185,13 +185,13 @@ // Switch to default if fewer or more arguments than required are supplied switch ((args.size() != argSize) ? "" : checkKind) { case "checkIndex": - return String.format("Index %d out-of-bounds for length %d", + return String.format("Index %d out of bounds for length %d", args.get(0), args.get(1)); case "checkFromToIndex": - return String.format("Range [%d, %d) out-of-bounds for length %d", + return String.format("Range [%d, %d) out of bounds for length %d", args.get(0), args.get(1), args.get(2)); case "checkFromIndexSize": - return String.format("Range [%d, %The {@code index} is defined to be out-of-bounds if any of the + *

        The {@code index} is defined to be out of bounds if any of the * following inequalities is true: *

          *
        • {@code index < 0}
        • @@ -210,14 +210,14 @@ *
        • {@code length < 0}, which is implied from the former inequalities
        • *
        * - *

        If the {@code index} is out-of-bounds, then a runtime exception is + *

        If the {@code index} is out of bounds, then a runtime exception is * thrown that is the result of applying the following arguments to the * exception formatter: the name of this method, {@code checkIndex}; * and an unmodifiable list integers whose values are, in order, the * out-of-bounds arguments {@code index} and {@code length}. * * @param the type of runtime exception to throw if the arguments are - * out-of-bounds + * out of bounds * @param index the index * @param length the upper-bound (exclusive) of the range * @param oobef the exception formatter that when applied with this @@ -228,9 +228,9 @@ * instead (though it may be more efficient). * Exceptions thrown by the formatter are relayed to the caller. * @return {@code index} if it is within bounds of the range - * @throws X if the {@code index} is out-of-bounds and the exception + * @throws X if the {@code index} is out of bounds and the exception * formatter is non-{@code null} - * @throws IndexOutOfBoundsException if the {@code index} is out-of-bounds + * @throws IndexOutOfBoundsException if the {@code index} is out of bounds * and the exception formatter is {@code null} * @since 9 * @@ -254,7 +254,7 @@ * {@code toIndex} (exclusive) is within the bounds of range from {@code 0} * (inclusive) to {@code length} (exclusive). * - *

        The sub-range is defined to be out-of-bounds if any of the following + *

        The sub-range is defined to be out of bounds if any of the following * inequalities is true: *

          *
        • {@code fromIndex < 0}
        • @@ -263,14 +263,14 @@ *
        • {@code length < 0}, which is implied from the former inequalities
        • *
        * - *

        If the sub-range is out-of-bounds, then a runtime exception is + *

        If the sub-range is out of bounds, then a runtime exception is * thrown that is the result of applying the following arguments to the * exception formatter: the name of this method, {@code checkFromToIndex}; * and an unmodifiable list integers whose values are, in order, the * out-of-bounds arguments {@code fromIndex}, {@code toIndex}, and {@code length}. * * @param the type of runtime exception to throw if the arguments are - * out-of-bounds + * out of bounds * @param fromIndex the lower-bound (inclusive) of the sub-range * @param toIndex the upper-bound (exclusive) of the sub-range * @param length the upper-bound (exclusive) the range @@ -282,9 +282,9 @@ * instead (though it may be more efficient). * Exceptions thrown by the formatter are relayed to the caller. * @return {@code fromIndex} if the sub-range within bounds of the range - * @throws X if the sub-range is out-of-bounds and the exception factory + * @throws X if the sub-range is out of bounds and the exception factory * function is non-{@code null} - * @throws IndexOutOfBoundsException if the sub-range is out-of-bounds and + * @throws IndexOutOfBoundsException if the sub-range is out of bounds and * the exception factory function is {@code null} * @since 9 */ @@ -301,7 +301,7 @@ * {@code fromIndex + size} (exclusive) is within the bounds of range from * {@code 0} (inclusive) to {@code length} (exclusive). * - *

        The sub-range is defined to be out-of-bounds if any of the following + *

        The sub-range is defined to be out of bounds if any of the following * inequalities is true: *

          *
        • {@code fromIndex < 0}
        • @@ -310,7 +310,7 @@ *
        • {@code length < 0}, which is implied from the former inequalities
        • *
        * - *

        If the sub-range is out-of-bounds, then a runtime exception is + *

        If the sub-range is out of bounds, then a runtime exception is * thrown that is the result of applying the following arguments to the * exception formatter: the name of this method, {@code checkFromIndexSize}; * and an unmodifiable list integers whose values are, in order, the @@ -318,7 +318,7 @@ * {@code length}. * * @param the type of runtime exception to throw if the arguments are - * out-of-bounds + * out of bounds * @param fromIndex the lower-bound (inclusive) of the sub-interval * @param size the size of the sub-range * @param length the upper-bound (exclusive) of the range @@ -330,9 +330,9 @@ * instead (though it may be more efficient). * Exceptions thrown by the formatter are relayed to the caller. * @return {@code fromIndex} if the sub-range within bounds of the range - * @throws X if the sub-range is out-of-bounds and the exception factory + * @throws X if the sub-range is out of bounds and the exception factory * function is non-{@code null} - * @throws IndexOutOfBoundsException if the sub-range is out-of-bounds and + * @throws IndexOutOfBoundsException if the sub-range is out of bounds and * the exception factory function is {@code null} * @since 9 */ diff -r 9822dd521c15 -r d93ae85b18c1 src/java.base/share/classes/sun/nio/ch/SocketChannelImpl.java --- a/src/java.base/share/classes/sun/nio/ch/SocketChannelImpl.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.base/share/classes/sun/nio/ch/SocketChannelImpl.java Tue May 15 10:13:52 2018 -0700 @@ -867,11 +867,22 @@ // set state to ST_KILLPENDING synchronized (stateLock) { assert state == ST_CLOSING; - // if connected, and the channel is registered with a Selector, we - // shutdown the output so that the peer reads EOF + // if connected and the channel is registered with a Selector then + // shutdown the output if possible so that the peer reads EOF. If + // SO_LINGER is enabled and set to a non-zero value then it needs to + // be disabled so that the Selector does not wait when it closes + // the socket. if (connected && isRegistered()) { try { - Net.shutdown(fd, Net.SHUT_WR); + SocketOption opt = StandardSocketOptions.SO_LINGER; + int interval = (int) Net.getSocketOption(fd, Net.UNSPEC, opt); + if (interval != 0) { + if (interval > 0) { + // disable SO_LINGER + Net.setSocketOption(fd, Net.UNSPEC, opt, -1); + } + Net.shutdown(fd, Net.SHUT_WR); + } } catch (IOException ignore) { } } state = ST_KILLPENDING; diff -r 9822dd521c15 -r d93ae85b18c1 src/java.desktop/share/classes/javax/imageio/ImageWriteParam.java --- a/src/java.desktop/share/classes/javax/imageio/ImageWriteParam.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.desktop/share/classes/javax/imageio/ImageWriteParam.java Tue May 15 10:13:52 2018 -0700 @@ -1243,7 +1243,7 @@ throw new IllegalStateException("No compression type set!"); } if (quality < 0.0F || quality > 1.0F) { - throw new IllegalArgumentException("Quality out-of-bounds!"); + throw new IllegalArgumentException("Quality out of bounds!"); } this.compressionQuality = quality; } @@ -1341,7 +1341,7 @@ throw new IllegalStateException("No compression type set!"); } if (quality < 0.0F || quality > 1.0F) { - throw new IllegalArgumentException("Quality out-of-bounds!"); + throw new IllegalArgumentException("Quality out of bounds!"); } return -1.0F; } diff -r 9822dd521c15 -r d93ae85b18c1 src/java.net.http/share/classes/java/net/http/HttpRequest.java --- a/src/java.net.http/share/classes/java/net/http/HttpRequest.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.net.http/share/classes/java/net/http/HttpRequest.java Tue May 15 10:13:52 2018 -0700 @@ -598,7 +598,7 @@ * @param length the number of bytes to use * @return a BodyPublisher * @throws IndexOutOfBoundsException if the sub-range is defined to be - * out-of-bounds + * out of bounds */ public static BodyPublisher ofByteArray(byte[] buf, int offset, int length) { Objects.checkFromIndexSize(offset, length, buf.length); diff -r 9822dd521c15 -r d93ae85b18c1 src/java.xml/share/classes/com/sun/org/apache/xerces/internal/impl/dtd/XMLDTDValidator.java --- a/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/impl/dtd/XMLDTDValidator.java Tue May 15 18:03:31 2018 +0530 +++ b/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/impl/dtd/XMLDTDValidator.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -20,6 +20,8 @@ package com.sun.org.apache.xerces.internal.impl.dtd; +import java.util.Iterator; + import com.sun.org.apache.xerces.internal.impl.Constants; import com.sun.org.apache.xerces.internal.impl.RevalidationHandler; import com.sun.org.apache.xerces.internal.impl.XMLEntityManager; @@ -51,7 +53,6 @@ import com.sun.org.apache.xerces.internal.xni.parser.XMLConfigurationException; import com.sun.org.apache.xerces.internal.xni.parser.XMLDocumentFilter; import com.sun.org.apache.xerces.internal.xni.parser.XMLDocumentSource; -import java.util.Iterator; /** * The DTD validator. The validator implements a document @@ -84,7 +85,7 @@ * @author Jeffrey Rodriguez IBM * @author Neil Graham, IBM * - * @LastModified: Nov 2017 + * @LastModified: May 2018 */ public class XMLDTDValidator implements XMLComponent, XMLDocumentFilter, XMLDTDValidatorFilter, RevalidationHandler { @@ -93,11 +94,6 @@ // Constants // - /** Symbol: "<<datatypes>>". */ - - /** Top level scope (-1). */ - private static final int TOP_LEVEL_SCOPE = -1; - // feature identifiers /** Feature identifier: namespaces. */ @@ -120,9 +116,8 @@ protected static final String WARN_ON_DUPLICATE_ATTDEF = Constants.XERCES_FEATURE_PREFIX + Constants.WARN_ON_DUPLICATE_ATTDEF_FEATURE; - protected static final String PARSER_SETTINGS = - Constants.XERCES_FEATURE_PREFIX + Constants.PARSER_SETTINGS; - + protected static final String PARSER_SETTINGS = + Constants.XERCES_FEATURE_PREFIX + Constants.PARSER_SETTINGS; // property identifiers @@ -348,7 +343,7 @@ private final QName fTempQName = new QName(); /** Temporary string buffers. */ - private final StringBuffer fBuffer = new StringBuffer(); + private final StringBuilder fBuffer = new StringBuilder(); // symbols: general @@ -492,11 +487,6 @@ * * @param featureId The feature identifier. * @param state The state of the feature. - * - * @throws SAXNotRecognizedException The component should not throw - * this exception. - * @throws SAXNotSupportedException The component should not throw - * this exception. */ public void setFeature(String featureId, boolean state) throws XMLConfigurationException { @@ -520,11 +510,6 @@ * * @param propertyId The property identifier. * @param value The value of the property. - * - * @throws SAXNotRecognizedException The component should not throw - * this exception. - * @throws SAXNotSupportedException The component should not throw - * this exception. */ public void setProperty(String propertyId, Object value) throws XMLConfigurationException { @@ -1198,7 +1183,7 @@ // add attribute fTempQName.setValues(attPrefix, attLocalpart, attRawName, fTempAttDecl.name.uri); - int newAttr = attributes.addAttribute(fTempQName, attType, attValue); + attributes.addAttribute(fTempQName, attType, attValue); } } // get next att decl in the Grammar for this element @@ -1232,14 +1217,12 @@ } } } - int attDefIndex = -1; int position = fDTDGrammar.getFirstAttributeDeclIndex(elementIndex); while (position != -1) { fDTDGrammar.getAttributeDecl(position, fTempAttDecl); if (fTempAttDecl.name.rawname == attrRawName) { // found the match att decl, - attDefIndex = position; declared = true; break; } @@ -1385,7 +1368,7 @@ } if (!found) { - StringBuffer enumValueString = new StringBuffer(); + StringBuilder enumValueString = new StringBuilder(); if (enumVals != null) for (int i = 0; i < enumVals.length; i++) { enumValueString.append(enumVals[i]+" "); @@ -1509,7 +1492,6 @@ boolean spaceStart = false; boolean readingNonSpace = false; int count = 0; - int eaten = 0; String attrValue = attributes.getValue(index); char[] attValue = new char[attrValue.length()]; @@ -1530,33 +1512,7 @@ fBuffer.append(attValue[i]); count++; } - else { - if (leadingSpace || !spaceStart) { - eaten ++; - /*** BUG #3512 *** - int entityCount = attributes.getEntityCount(index); - for (int j = 0; j < entityCount; j++) { - int offset = attributes.getEntityOffset(index, j); - int length = attributes.getEntityLength(index, j); - if (offset <= i-eaten+1) { - if (offset+length >= i-eaten+1) { - if (length > 0) - length--; - } - } - else { - if (offset > 0) - offset--; - } - attributes.setEntityOffset(index, j, offset); - attributes.setEntityLength(index, j, length); - } - /***/ - } - } - - } - else { + } else { readingNonSpace = true; spaceStart = false; leadingSpace = false; @@ -1568,23 +1524,6 @@ // check if the last appended character is a space. if (count > 0 && fBuffer.charAt(count-1) == ' ') { fBuffer.setLength(count-1); - /*** BUG #3512 *** - int entityCount = attributes.getEntityCount(index); - for (int j=0; j < entityCount; j++) { - int offset = attributes.getEntityOffset(index, j); - int length = attributes.getEntityLength(index, j); - if (offset < count-1) { - if (offset+length == count) { - length--; - } - } - else { - offset--; - } - attributes.setEntityOffset(index, j, offset); - attributes.setEntityLength(index, j, length); - } - /***/ } String newValue = fBuffer.toString(); attributes.setValue(index, newValue); @@ -1645,9 +1584,6 @@ fDTDGrammar.getElementDecl(elementIndex, fTempElementDecl); - // Get the element name index from the element - final String elementType = fCurrentElement.rawname; - // Get out the content spec for this element final int contentType = fCurrentContentSpecType; @@ -1711,18 +1647,6 @@ } // checkContent(int,int,QName[]):int - /** Returns the content spec type for an element index. */ - private int getContentSpecType(int elementIndex) { - - int contentSpecType = -1; - if (elementIndex > -1) { - if (fDTDGrammar.getElementDecl(elementIndex,fTempElementDecl)) { - contentSpecType = fTempElementDecl.type; - } - } - return contentSpecType; - } - /** Character data in content. */ private void charDataInContent() { @@ -1754,7 +1678,11 @@ return attrDecl.simpleType.list ? XMLSymbols.fENTITIESSymbol : XMLSymbols.fENTITYSymbol; } case XMLSimpleType.TYPE_ENUMERATION: { - StringBuffer buffer = new StringBuffer(); + int totalLength = 2; + for (int i = 0; i < attrDecl.simpleType.enumeration.length; i++) { + totalLength += attrDecl.simpleType.enumeration[i].length() + 1; + } + StringBuilder buffer = new StringBuilder(totalLength); buffer.append('('); for (int i=0; i 0) { diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -528,7 +528,7 @@ switch (osName) { case "Linux": case "SunOS": - JELFRelocObject elfobj = new JELFRelocObject(this, outputFileName); + JELFRelocObject elfobj = JELFRelocObject.newInstance(this, outputFileName); elfobj.createELFRelocObject(relocationTable, symbolTable.values()); break; case "Mac OS X": @@ -576,7 +576,7 @@ * @param info relocation information to be added */ public void addRelocation(Relocation info) { - // System.out.println("# Relocation [" + symName + "] [" + info.getOffset() + "] [" + + // System.out.println("# Relocation [" + info.getSymbol() + "] [" + info.getOffset() + "] [" + // info.getSection().getContainerName() + "] [" + info.getSymbol().getName() + "] [" + // info.getSymbol().getOffset() + " @ " + info.getSymbol().getSection().getContainerName() + // "]"); diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/AArch64JELFRelocObject.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/AArch64JELFRelocObject.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.tools.jaotc.binformat.elf; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import jdk.tools.jaotc.binformat.BinaryContainer; +import jdk.tools.jaotc.binformat.ByteContainer; +import jdk.tools.jaotc.binformat.CodeContainer; +import jdk.tools.jaotc.binformat.ReadOnlyDataContainer; +import jdk.tools.jaotc.binformat.Relocation; +import jdk.tools.jaotc.binformat.Relocation.RelocType; +import jdk.tools.jaotc.binformat.Symbol; +import jdk.tools.jaotc.binformat.Symbol.Binding; +import jdk.tools.jaotc.binformat.Symbol.Kind; + +import jdk.tools.jaotc.binformat.elf.ElfSymbol; +import jdk.tools.jaotc.binformat.elf.ElfTargetInfo; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Shdr; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela; + + +public class AArch64JELFRelocObject extends JELFRelocObject { + + AArch64JELFRelocObject(BinaryContainer binContainer, String outputFileName) { + super(binContainer, outputFileName); + } + + void createRelocation(Symbol symbol, Relocation reloc, ElfRelocTable elfRelocTable) { + RelocType relocType = reloc.getType(); + + int elfRelocType = getELFRelocationType(relocType); + ElfSymbol sym = (ElfSymbol) symbol.getNativeSymbol(); + int symno = sym.getIndex(); + int sectindex = reloc.getSection().getSectionId(); + int offset = reloc.getOffset(); + int addend = 0; + + switch (relocType) { + case STUB_CALL_DIRECT: + case JAVA_CALL_DIRECT: { + break; + } + case EXTERNAL_PLT_TO_GOT: + offset -= 16; + elfRelocTable.createRelocationEntry(sectindex, offset, symno, Elf64_Rela.R_AARCH64_ADR_PREL_PG_HI21, addend); + elfRelocTable.createRelocationEntry(sectindex, offset + 4, symno, Elf64_Rela.R_AARCH64_ADD_ABS_LO12_NC, addend); + return; + + case FOREIGN_CALL_INDIRECT_GOT: { + break; + } + case METASPACE_GOT_REFERENCE: { + offset -= 4; + + elfRelocTable.createRelocationEntry(sectindex, offset, symno, Elf64_Rela.R_AARCH64_ADR_PREL_PG_HI21, addend); + elfRelocTable.createRelocationEntry(sectindex, offset + 4, symno, Elf64_Rela.R_AARCH64_ADD_ABS_LO12_NC, addend); + return; + } + // break; + case JAVA_CALL_INDIRECT: { + addend = -4; + offset = offset + addend; + break; + } + case EXTERNAL_GOT_TO_PLT: { + // this is load time relocations + break; + } + default: + throw new InternalError("Unhandled relocation type: " + relocType); + } + + elfRelocTable.createRelocationEntry(sectindex, offset, symno, elfRelocType, addend); + } + + int getELFRelocationType(RelocType relocType) { + int elfRelocType = 0; // R__NONE if #define'd to 0 for all values of ARCH + switch (ElfTargetInfo.getElfArch()) { + case Elf64_Ehdr.EM_AARCH64: + // Return R_X86_64_* entries based on relocType + if (relocType == RelocType.JAVA_CALL_DIRECT || + relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) { + elfRelocType = Elf64_Rela.R_AARCH64_CALL26; + } else if (relocType == RelocType.STUB_CALL_DIRECT) { + elfRelocType = Elf64_Rela.R_AARCH64_CALL26; + } else if (relocType == RelocType.JAVA_CALL_INDIRECT) { + elfRelocType = Elf64_Rela.R_AARCH64_CALL26; + } else if (relocType == RelocType.METASPACE_GOT_REFERENCE || + relocType == RelocType.EXTERNAL_PLT_TO_GOT) { + elfRelocType = Elf64_Rela.R_AARCH64_NONE; + } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT) { + elfRelocType = Elf64_Rela.R_AARCH64_ABS64; + } else { + assert false : "Unhandled relocation type: " + relocType; + } + break; + + default: + System.out.println("Relocation Type mapping: Unhandled architecture: " + + ElfTargetInfo.getElfArch()); + } + return elfRelocType; + } +} diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/AMD64JELFRelocObject.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/AMD64JELFRelocObject.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.tools.jaotc.binformat.elf; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import jdk.tools.jaotc.binformat.BinaryContainer; +import jdk.tools.jaotc.binformat.ByteContainer; +import jdk.tools.jaotc.binformat.CodeContainer; +import jdk.tools.jaotc.binformat.ReadOnlyDataContainer; +import jdk.tools.jaotc.binformat.Relocation; +import jdk.tools.jaotc.binformat.Relocation.RelocType; +import jdk.tools.jaotc.binformat.Symbol; +import jdk.tools.jaotc.binformat.Symbol.Binding; +import jdk.tools.jaotc.binformat.Symbol.Kind; + +import jdk.tools.jaotc.binformat.elf.ElfSymbol; +import jdk.tools.jaotc.binformat.elf.ElfTargetInfo; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Ehdr; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Shdr; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym; +import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela; + + +public class AMD64JELFRelocObject extends JELFRelocObject { + + AMD64JELFRelocObject(BinaryContainer binContainer, String outputFileName) { + super(binContainer, outputFileName); + } + + protected void createRelocation(Symbol symbol, Relocation reloc, ElfRelocTable elfRelocTable) { + RelocType relocType = reloc.getType(); + + int elfRelocType = getELFRelocationType(relocType); + ElfSymbol sym = (ElfSymbol) symbol.getNativeSymbol(); + int symno = sym.getIndex(); + int sectindex = reloc.getSection().getSectionId(); + int offset = reloc.getOffset(); + int addend = 0; + + switch (relocType) { + case JAVA_CALL_DIRECT: + case STUB_CALL_DIRECT: + case FOREIGN_CALL_INDIRECT_GOT: { + // Create relocation entry + addend = -4; // Size in bytes of the patch location + // Relocation should be applied at the location after call operand + offset = offset + reloc.getSize() + addend; + break; + } + case JAVA_CALL_INDIRECT: + case METASPACE_GOT_REFERENCE: + case EXTERNAL_PLT_TO_GOT: { + addend = -4; // Size of 32-bit address of the GOT + /* + * Relocation should be applied before the test instruction to the move instruction. + * reloc.getOffset() points to the test instruction after the instruction that loads the address of + * polling page. So set the offset appropriately. + */ + offset = offset + addend; + break; + } + case EXTERNAL_GOT_TO_PLT: { + // this is load time relocations + break; + } + default: + throw new InternalError("Unhandled relocation type: " + relocType); + } + elfRelocTable.createRelocationEntry(sectindex, offset, symno, elfRelocType, addend); + } + + private int getELFRelocationType(RelocType relocType) { + int elfRelocType = 0; // R__NONE if #define'd to 0 for all values of ARCH + switch (ElfTargetInfo.getElfArch()) { + case Elf64_Ehdr.EM_X86_64: + // Return R_X86_64_* entries based on relocType + if (relocType == RelocType.JAVA_CALL_DIRECT || + relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) { + elfRelocType = Elf64_Rela.R_X86_64_PLT32; + } else if (relocType == RelocType.STUB_CALL_DIRECT) { + elfRelocType = Elf64_Rela.R_X86_64_PC32; + } else if (relocType == RelocType.JAVA_CALL_INDIRECT) { + elfRelocType = Elf64_Rela.R_X86_64_NONE; + } else if (relocType == RelocType.METASPACE_GOT_REFERENCE || + relocType == RelocType.EXTERNAL_PLT_TO_GOT) { + elfRelocType = Elf64_Rela.R_X86_64_PC32; + } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT) { + elfRelocType = Elf64_Rela.R_X86_64_64; + } else { + assert false : "Unhandled relocation type: " + relocType; + } + break; + + default: + System.out.println("Relocation Type mapping: Unhandled architecture: " + + ElfTargetInfo.getElfArch()); + } + return elfRelocType; + } +} diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/Elf.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,6 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -208,12 +209,26 @@ /** * Relocation types */ + static final int R_X86_64_NONE = 0x0; static final int R_X86_64_64 = 0x1; static final int R_X86_64_PC32 = 0x2; static final int R_X86_64_PLT32 = 0x4; static final int R_X86_64_GOTPCREL = 0x9; + static final int R_AARCH64_NONE = 256; + static final int R_AARCH64_ABS64 = 257; + static final int R_AARCH64_CALL26 = 283; + static final int R_AARCH64_ADR_GOT_PAGE = 311; + static final int R_AARCH64_LD64_GOT_LO12_NC = 312; + + static final int R_AARCH64_MOVW_UABS_G0_NC = 264; + static final int R_AARCH64_MOVW_UABS_G1_NC = 266; + static final int R_AARCH64_MOVW_UABS_G2_NC = 268; + + static final int R_AARCH64_ADR_PREL_PG_HI21 = 275; + static final int R_AARCH64_ADD_ABS_LO12_NC = 277; + static final int R_AARCH64_LDST64_ABS_LO12_NC = 286; } /** @@ -240,6 +255,20 @@ static final int R_X86_64_PLT32 = 0x4; static final int R_X86_64_GOTPCREL = 0x9; + static final int R_AARCH64_NONE = 256; + static final int R_AARCH64_ABS64 = 257; + static final int R_AARCH64_CALL26 = 283; + static final int R_AARCH64_ADR_GOT_PAGE = 311; + static final int R_AARCH64_LD64_GOT_LO12_NC = 312; + + static final int R_AARCH64_MOVW_UABS_G0_NC = 264; + static final int R_AARCH64_MOVW_UABS_G1_NC = 266; + static final int R_AARCH64_MOVW_UABS_G2_NC = 268; + + static final int R_AARCH64_ADR_PREL_PG_HI21 = 275; + static final int R_AARCH64_ADD_ABS_LO12_NC = 277; + static final int R_AARCH64_LDST64_ABS_LO12_NC = 286; + static long ELF64_R_INFO(int symidx, int type) { return (((long)symidx << 32) + type); } diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/ElfTargetInfo.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,8 @@ if (archStr.equals("amd64") || archStr.equals("x86_64")) { arch = Elf64_Ehdr.EM_X86_64; + } else if (archStr.equals("aarch64")) { + arch = Elf64_Ehdr.EM_AARCH64; } else { System.out.println("Unsupported architecture " + archStr); arch = Elf64_Ehdr.EM_NONE; diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/elf/JELFRelocObject.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Sym; import jdk.tools.jaotc.binformat.elf.Elf.Elf64_Rela; -public class JELFRelocObject { +public abstract class JELFRelocObject { private final BinaryContainer binContainer; @@ -54,12 +54,22 @@ private final int segmentSize; - public JELFRelocObject(BinaryContainer binContainer, String outputFileName) { + protected JELFRelocObject(BinaryContainer binContainer, String outputFileName) { this.binContainer = binContainer; this.elfContainer = new ElfContainer(outputFileName); this.segmentSize = binContainer.getCodeSegmentSize(); } + public static JELFRelocObject newInstance(BinaryContainer binContainer, String outputFileName) { + String archStr = System.getProperty("os.arch").toLowerCase(); + if (archStr.equals("amd64") || archStr.equals("x86_64")) { + return new AMD64JELFRelocObject(binContainer, outputFileName); + } else if (archStr.equals("aarch64")) { + return new AArch64JELFRelocObject(binContainer, outputFileName); + } + throw new InternalError("Unsupported platform: " + archStr); + } + private static ElfSection createByteSection(ArrayList sections, String sectName, byte[] scnData, @@ -295,75 +305,6 @@ return (elfRelocTable); } - private static void createRelocation(Symbol symbol, Relocation reloc, ElfRelocTable elfRelocTable) { - RelocType relocType = reloc.getType(); - - int elfRelocType = getELFRelocationType(relocType); - ElfSymbol sym = (ElfSymbol) symbol.getNativeSymbol(); - int symno = sym.getIndex(); - int sectindex = reloc.getSection().getSectionId(); - int offset = reloc.getOffset(); - int addend = 0; - - switch (relocType) { - case JAVA_CALL_DIRECT: - case STUB_CALL_DIRECT: - case FOREIGN_CALL_INDIRECT_GOT: { - // Create relocation entry - addend = -4; // Size in bytes of the patch location - // Relocation should be applied at the location after call operand - offset = offset + reloc.getSize() + addend; - break; - } - case JAVA_CALL_INDIRECT: - case METASPACE_GOT_REFERENCE: - case EXTERNAL_PLT_TO_GOT: { - addend = -4; // Size of 32-bit address of the GOT - /* - * Relocation should be applied before the test instruction to the move instruction. - * reloc.getOffset() points to the test instruction after the instruction that loads the address of - * polling page. So set the offset appropriately. - */ - offset = offset + addend; - break; - } - case EXTERNAL_GOT_TO_PLT: { - // this is load time relocations - break; - } - default: - throw new InternalError("Unhandled relocation type: " + relocType); - } - elfRelocTable.createRelocationEntry(sectindex, offset, symno, elfRelocType, addend); - } - - private static int getELFRelocationType(RelocType relocType) { - int elfRelocType = 0; // R__NONE if #define'd to 0 for all values of ARCH - switch (ElfTargetInfo.getElfArch()) { - case Elf64_Ehdr.EM_X86_64: - // Return R_X86_64_* entries based on relocType - if (relocType == RelocType.JAVA_CALL_DIRECT || - relocType == RelocType.FOREIGN_CALL_INDIRECT_GOT) { - elfRelocType = Elf64_Rela.R_X86_64_PLT32; - } else if (relocType == RelocType.STUB_CALL_DIRECT) { - elfRelocType = Elf64_Rela.R_X86_64_PC32; - } else if (relocType == RelocType.JAVA_CALL_INDIRECT) { - elfRelocType = Elf64_Rela.R_X86_64_NONE; - } else if (relocType == RelocType.METASPACE_GOT_REFERENCE || - relocType == RelocType.EXTERNAL_PLT_TO_GOT) { - elfRelocType = Elf64_Rela.R_X86_64_PC32; - } else if (relocType == RelocType.EXTERNAL_GOT_TO_PLT) { - elfRelocType = Elf64_Rela.R_X86_64_64; - } else { - assert false : "Unhandled relocation type: " + relocType; - } - break; - default: - System.out.println("Relocation Type mapping: Unhandled architecture"); - } - return elfRelocType; - } - private static void createElfRelocSections(ArrayList sections, ElfRelocTable elfRelocTable, int symtabsectidx) { @@ -383,4 +324,7 @@ } } } + + abstract void createRelocation(Symbol symbol, Relocation reloc, ElfRelocTable elfRelocTable); + } diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CodeSectionProcessor.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CodeSectionProcessor.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CodeSectionProcessor.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,8 @@ import org.graalvm.compiler.code.CompilationResult; import org.graalvm.compiler.hotspot.HotSpotForeignCallLinkage; +import jdk.vm.ci.aarch64.AArch64; +import jdk.vm.ci.amd64.AMD64; import jdk.vm.ci.code.TargetDescription; import jdk.vm.ci.code.site.Call; import jdk.vm.ci.code.site.Infopoint; @@ -72,7 +74,8 @@ for (Infopoint infopoint : compResult.getInfopoints()) { if (infopoint.reason == InfopointReason.CALL) { final Call callInfopoint = (Call) infopoint; - if (callInfopoint.target instanceof HotSpotForeignCallLinkage) { + if (callInfopoint.target instanceof HotSpotForeignCallLinkage && + target.arch instanceof AMD64) { // TODO 4 is x86 size of relative displacement. // For SPARC need something different. int destOffset = infopoint.pcOffset + callInfopoint.size - 4; diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CompiledMethodInfo.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CompiledMethodInfo.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/CompiledMethodInfo.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,8 @@ final class CompiledMethodInfo { + static final String archStr = System.getProperty("os.arch").toLowerCase(); + private static final int UNINITIALIZED_OFFSET = -1; private static class AOTMethodOffsets { @@ -304,10 +306,17 @@ boolean hasMark(Site call, MarkId id) { for (Mark m : compilationResult.getMarks()) { - // TODO: X64-specific code. - // Call instructions are aligned to 8 - // bytes - 1 on x86 to patch address atomically, - int adjOffset = (m.pcOffset & (-8)) + 7; + int adjOffset = m.pcOffset; + if (archStr.equals("aarch64")) { + // The mark is at the end of a group of three instructions: + // adrp; add; ldr + adjOffset += 12; + } else { + // X64-specific code. + // Call instructions are aligned to 8 + // bytes - 1 on x86 to patch address atomically, + adjOffset = (adjOffset & (-8)) + 7; + } // Mark points before aligning nops. if ((call.pcOffset == adjOffset) && MarkId.getEnum((int) m.id) == id) { return true; diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/ELFMacroAssembler.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/ELFMacroAssembler.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/ELFMacroAssembler.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,10 @@ import jdk.tools.jaotc.StubInformation; import jdk.tools.jaotc.amd64.AMD64ELFMacroAssembler; +import jdk.tools.jaotc.aarch64.AArch64ELFMacroAssembler; import jdk.vm.ci.amd64.AMD64; +import jdk.vm.ci.aarch64.AArch64; import jdk.vm.ci.code.Architecture; import jdk.vm.ci.code.TargetDescription; @@ -36,6 +38,8 @@ Architecture architecture = target.arch; if (architecture instanceof AMD64) { return new AMD64ELFMacroAssembler(target); + } else if (architecture instanceof AArch64) { + return new AArch64ELFMacroAssembler(target); } else { throw new InternalError("Unsupported architecture " + architecture); } diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/InstructionDecoder.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/InstructionDecoder.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/InstructionDecoder.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,8 +24,10 @@ package jdk.tools.jaotc; import jdk.tools.jaotc.amd64.AMD64InstructionDecoder; +import jdk.tools.jaotc.aarch64.AArch64InstructionDecoder; import jdk.vm.ci.amd64.AMD64; +import jdk.vm.ci.aarch64.AArch64; import jdk.vm.ci.code.Architecture; import jdk.vm.ci.code.TargetDescription; @@ -35,6 +37,8 @@ Architecture architecture = target.arch; if (architecture instanceof AMD64) { return new AMD64InstructionDecoder(target); + } else if (architecture instanceof AArch64) { + return new AArch64InstructionDecoder(target); } else { throw new InternalError("Unsupported architecture " + architecture); } diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/JavaCallSiteRelocationSymbol.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/JavaCallSiteRelocationSymbol.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/JavaCallSiteRelocationSymbol.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,19 @@ private static final byte[] zeroSlot = new byte[8]; // -1 represents Universe::non_oop_word() value - private static final byte[] minusOneSlot = {-1, -1, -1, -1, -1, -1, -1, -1}; + private static final byte[] minusOneSlot; + + static { + String archStr = System.getProperty("os.arch").toLowerCase(); + if (archStr.equals("aarch64")) { + // AArch64 is a special case: it uses 48-bit addresses. + byte[] non_oop_word = {-1, -1, -1, -1, -1, -1, 0, 0}; + minusOneSlot = non_oop_word; + } else { + byte[] non_oop_word = {-1, -1, -1, -1, -1, -1, -1, -1}; + minusOneSlot = non_oop_word; + } + } JavaCallSiteRelocationSymbol(CompiledMethodInfo mi, Call call, CallSiteRelocationInfo callSiteRelocation, BinaryContainer binaryContainer) { super(createPltEntrySymbol(binaryContainer, mi, call, callSiteRelocation)); @@ -123,6 +135,7 @@ */ private static String getResolveSymbolName(CompiledMethodInfo mi, Call call) { String resolveSymbolName; + String name = call.target.toString(); if (CallInfo.isStaticCall(call)) { assert mi.hasMark(call, MarkId.INVOKESTATIC); resolveSymbolName = BinaryContainer.getResolveStaticEntrySymbolName(); diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Linker.java --- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Linker.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Linker.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,7 @@ if (name.endsWith(".so")) { objectFileName = name.substring(0, name.length() - ".so".length()); } + objectFileName = objectFileName + ".o"; linkerPath = (options.linkerpath != null) ? options.linkerpath : "ld"; linkerCmd = linkerPath + " -shared -z noexecstack -o " + libraryFileName + " " + objectFileName; linkerCheck = linkerPath + " -v"; @@ -130,7 +131,8 @@ throw new InternalError(errorMessage); } File objFile = new File(objectFileName); - if (objFile.exists()) { + boolean keepObjFile = Boolean.parseBoolean(System.getProperty("aot.keep.objFile", "false")); + if (objFile.exists() && !keepObjFile) { if (!objFile.delete()) { throw new InternalError("Failed to delete " + objectFileName + " file"); } diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/aarch64/AArch64ELFMacroAssembler.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/aarch64/AArch64ELFMacroAssembler.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.tools.jaotc.aarch64; + +import jdk.tools.jaotc.StubInformation; +import jdk.tools.jaotc.ELFMacroAssembler; + +import org.graalvm.compiler.asm.aarch64.AArch64Address; +import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; + + +import jdk.vm.ci.code.TargetDescription; +import jdk.vm.ci.code.Register; + +import static jdk.vm.ci.aarch64.AArch64.*; + +public final class AArch64ELFMacroAssembler extends AArch64MacroAssembler implements ELFMacroAssembler { + + private int currentEndOfInstruction; + + public AArch64ELFMacroAssembler(TargetDescription target) { + super(target); + } + + @Override + public int currentEndOfInstruction() { + return currentEndOfInstruction; + } + + @Override + public byte[] getPLTJumpCode() { + // The main dispatch instruction + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + + currentEndOfInstruction = position(); + + align(8); + + return close(true); + } + + @Override + public byte[] getPLTStaticEntryCode(StubInformation stub) { + // The main dispatch instruction + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + stub.setDispatchJumpOffset(position()); + + // C2I stub used to call interpreter. First load r12 + // (i.e. rmethod) with a pointer to the Method structure ... + addressOf(r12); + ldr(64, r12, AArch64Address.createBaseRegisterOnlyAddress(r12)); + nop(); + stub.setMovOffset(position()); + + // ... then jump to the interpreter. + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + stub.setC2IJumpOffset(position()); + + // Call to VM runtime to resolve the call. + stub.setResolveJumpStart(position()); + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + stub.setResolveJumpOffset(position()); + currentEndOfInstruction = position(); + + align(8); + stub.setSize(position()); + + return close(true); + } + + @Override + public byte[] getPLTVirtualEntryCode(StubInformation stub) { + // Fixup an inline cache. + // Load r9 with a pointer to the Klass. + addressOf(r17); + ldr(64, r9, AArch64Address.createBaseRegisterOnlyAddress(r17)); + nop(); + stub.setMovOffset(position()); + + // Jump to the method. + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + stub.setDispatchJumpOffset(position()); + + // Call to VM runtime to resolve the call. + stub.setResolveJumpStart(position()); + addressOf(r16); + ldr(64, r16, AArch64Address.createBaseRegisterOnlyAddress(r16)); + jmp(r16); + stub.setResolveJumpOffset(position()); + currentEndOfInstruction = position(); + + align(8); + stub.setSize(position()); + + return close(true); + } +} diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/aarch64/AArch64InstructionDecoder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/aarch64/AArch64InstructionDecoder.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.tools.jaotc.aarch64; + +import jdk.tools.jaotc.InstructionDecoder; + +import jdk.vm.ci.code.TargetDescription; + +public final class AArch64InstructionDecoder extends InstructionDecoder { + + private int currentEndOfInstruction; + + public AArch64InstructionDecoder(TargetDescription target) { + } + + @Override + public int currentEndOfInstruction() { + return currentEndOfInstruction; + } + + @Override + public void decodePosition(final byte[] code, int pcOffset) { + currentEndOfInstruction = pcOffset + 4; + } +} diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Tue May 15 10:13:52 2018 -0700 @@ -2705,6 +2705,9 @@ Type target = null; for (Type bound : ict.getExplicitComponents()) { TypeSymbol boundSym = bound.tsym; + if (bound.tsym == syms.objectType.tsym) { + continue; + } if (types.isFunctionalInterface(boundSym) && types.findDescriptorSymbol(boundSym) == desc) { target = bound; @@ -3032,14 +3035,14 @@ targetError = false; } - JCDiagnostic detailsDiag = ((Resolve.ResolveError)refSym.baseSymbol()).getDiagnostic(JCDiagnostic.DiagnosticType.FRAGMENT, + JCDiagnostic detailsDiag = ((Resolve.ResolveError)refSym.baseSymbol()) + .getDiagnostic(JCDiagnostic.DiagnosticType.FRAGMENT, that, exprType.tsym, exprType, that.name, argtypes, typeargtypes); - JCDiagnostic.DiagnosticType diagKind = targetError ? - JCDiagnostic.DiagnosticType.FRAGMENT : JCDiagnostic.DiagnosticType.ERROR; - - JCDiagnostic diag = diags.create(diagKind, log.currentSource(), that, - "invalid.mref", Kinds.kindName(that.getMode()), detailsDiag); + JCDiagnostic diag = diags.create(log.currentSource(), that, + targetError ? + Fragments.InvalidMref(Kinds.kindName(that.getMode()), detailsDiag) : + Errors.InvalidMref(Kinds.kindName(that.getMode()), detailsDiag)); if (targetError && currentTarget == Type.recoveryType) { //a target error doesn't make sense during recovery stage diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Tue May 15 10:13:52 2018 -0700 @@ -278,7 +278,7 @@ invalid {0} reference\n\ {1} -# 0: symbol kind, 1: message segment +# 0: kind name, 1: message segment compiler.misc.invalid.mref=\ invalid {0} reference\n\ {1} @@ -2469,6 +2469,11 @@ cannot find symbol\n\ symbol: {0} {1}({3}) +# 0: kind name, 1: name, 2: unused, 3: list of type +compiler.misc.cant.resolve.args=\ + cannot find symbol\n\ + symbol: {0} {1}({3}) + # 0: kind name, 1: name, 2: list of type, 3: list of type compiler.err.cant.resolve.args.params=\ cannot find symbol\n\ diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -563,7 +563,7 @@ nameAddr = entryAddr.getAddressAt(longConstantEntryNameOffset); if (nameAddr != null) { String name = CStringUtilities.getString(nameAddr); - int value = (int) entryAddr.getCIntegerAt(longConstantEntryValueOffset, C_INT64_SIZE, true); + long value = entryAddr.getCIntegerAt(longConstantEntryValueOffset, C_INT64_SIZE, true); // Be a little resilient Long oldValue = lookupLongConstant(name, false); diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -274,9 +274,15 @@ public static final int _fast_aldc_w = 231; public static final int _return_register_finalizer = 232; public static final int _invokehandle = 233; - public static final int _shouldnotreachhere = 234; // For debugging - public static final int number_of_codes = 235; + // Bytecodes rewritten at CDS dump time + public static final int _nofast_getfield = 234; + public static final int _nofast_putfield = 235; + public static final int _nofast_aload_0 = 236; + public static final int _nofast_iload = 237; + public static final int _shouldnotreachhere = 238; // For debugging + + public static final int number_of_codes = 239; // Flag bits derived from format strings, can_trap, can_rewrite, etc.: // semantic flags: @@ -807,6 +813,12 @@ // special handling of signature-polymorphic methods def(_invokehandle , "invokehandle" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual ); + // CDS specific. Bytecodes rewritten at CDS dump time + def(_nofast_getfield , "_nofast_getfield" , "bJJ" , null , BasicType.getTIllegal() , 0, true, _getfield ); + def(_nofast_putfield , "_nofast_putfield" , "bJJ" , null , BasicType.getTIllegal() ,-2, true, _putfield ); + def(_nofast_aload_0 , "_nofast_aload_0" , "b" , null , BasicType.getTIllegal() , 1, true, _aload_0 ); + def(_nofast_iload , "_nofast_iload" , "bi" , null , BasicType.getTIllegal() , 1, false, _iload ); + def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false); if (Assert.ASSERTS_ENABLED) { diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.internal.vm.compiler/share/classes/module-info.java --- a/src/jdk.internal.vm.compiler/share/classes/module-info.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.internal.vm.compiler/share/classes/module-info.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ exports org.graalvm.compiler.api.runtime to jdk.aot; exports org.graalvm.compiler.api.replacements to jdk.aot; exports org.graalvm.compiler.asm.amd64 to jdk.aot; + exports org.graalvm.compiler.asm.aarch64 to jdk.aot; exports org.graalvm.compiler.bytecode to jdk.aot; exports org.graalvm.compiler.code to jdk.aot; exports org.graalvm.compiler.core to jdk.aot; diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64Assembler.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64Assembler.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64Assembler.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,6 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +27,7 @@ import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ADD; import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ADDS; import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ADR; +import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ADRP; import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.AND; import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ANDS; import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.ASRV; @@ -1347,15 +1349,13 @@ /** * Address of page: sign extends 21-bit offset, shifts if left by 12 and adds it to the value of * the PC with its bottom 12-bits cleared, writing the result to dst. + * No offset is emiited; the instruction will be patched later. * * @param dst general purpose register. May not be null, zero-register or stackpointer. - * @param imm Signed 33-bit offset with lower 12bits clear. */ - // protected void adrp(Register dst, long imm) { - // assert (imm & NumUtil.getNbitNumberInt(12)) == 0 : "Lower 12-bit of immediate must be zero."; - // assert NumUtil.isSignedNbit(33, imm); - // addressCalculationInstruction(dst, (int) (imm >>> 12), Instruction.ADRP); - // } + public void adrp(Register dst) { + emitInt(ADRP.encoding | PcRelImmOp | rd(dst) ); + } /** * Adds a 21-bit signed offset to the program counter and writes the result to dst. @@ -1371,6 +1371,10 @@ emitInt(ADR.encoding | PcRelImmOp | rd(dst) | getPcRelativeImmEncoding(imm21), pos); } + public void adrp(Register dst, int pageOffset) { + emitInt(ADRP.encoding | PcRelImmOp | rd(dst) | getPcRelativeImmEncoding(pageOffset)); + } + private static int getPcRelativeImmEncoding(int imm21) { assert NumUtil.isSignedNbit(21, imm21); int imm = imm21 & NumUtil.getNbitNumberInt(21); diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1564,6 +1565,12 @@ return AArch64Address.PLACEHOLDER; } + public void addressOf(Register dst) { + // This will be fixed up later. + super.adrp(dst); + super.add(64, dst, dst, 0); + } + /** * Loads an address into Register d. * diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfigVersioned.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfigVersioned.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfigVersioned.java Tue May 15 10:13:52 2018 -0700 @@ -44,7 +44,7 @@ final boolean useCRC32CIntrinsics = getFlag("UseCRC32CIntrinsics", Boolean.class); // JDK-8075171 - final boolean inlineNotify = getFlag("InlineNotify", Boolean.class); + final boolean inlineNotify = getFlag("InlineNotify", Boolean.class, true); // JDK-8046936 final int javaThreadReservedStackActivationOffset = getFieldOffset("JavaThread::_reserved_stack_activation", Integer.class, "address"); diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/ToolOption.java --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/ToolOption.java Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/ToolOption.java Tue May 15 10:13:52 2018 -0700 @@ -196,6 +196,13 @@ } }, + ENABLE_PREVIEW("--enable-preview", STANDARD) { + @Override + public void process(Helper helper) throws InvalidValueException { + Option.PREVIEW.process(helper.getOptionHelper(), primaryName); + } + }, + // ----- doclet options ----- DOCLET("-doclet", STANDARD, true), // handled in setDocletInvoker @@ -404,7 +411,7 @@ void process(Helper helper, String arg) throws OptionException, Option.InvalidValueException { } - void process(Helper helper) throws OptionException { } + void process(Helper helper) throws OptionException, Option.InvalidValueException { } List getNames() { return names; diff -r 9822dd521c15 -r d93ae85b18c1 src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/resources/javadoc.properties --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/resources/javadoc.properties Tue May 15 18:03:31 2018 +0530 +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/resources/javadoc.properties Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -179,6 +179,10 @@ main.opt.source.desc=\ Provide source compatibility with specified release +main.opt.enable.preview.desc=\ + Enable preview language features. To be used in conjunction with\n\ + either -source or --release. + main.opt.extdirs.arg=\ main.opt.extdirs.desc=\ diff -r 9822dd521c15 -r d93ae85b18c1 test/failure_handler/src/share/conf/mac.properties --- a/test/failure_handler/src/share/conf/mac.properties Tue May 15 18:03:31 2018 +0530 +++ b/test/failure_handler/src/share/conf/mac.properties Tue May 15 10:13:52 2018 -0700 @@ -76,6 +76,7 @@ system.dmesg system.sysctl \ process.ps process.top \ memory.vmstat \ + files \ netstat.av netstat.aL netstat.m netstat.s \ ifconfig \ scutil.nwi scutil.proxy @@ -104,6 +105,8 @@ memory.vmstat.app=vm_stat memory.vmstat.args=-c 3 3 +files.app=lsof + netstat.app=netstat netstat.av.args=-av netstat.aL.args=-aL diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/TEST.groups --- a/test/hotspot/jtreg/TEST.groups Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/TEST.groups Tue May 15 10:13:52 2018 -0700 @@ -54,6 +54,7 @@ hotspot_misc = \ / \ -applications \ + -vmTestbase \ -:hotspot_compiler \ -:hotspot_gc \ -:hotspot_runtime \ diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/compiler/aot/RecompilationTest.java --- a/test/hotspot/jtreg/compiler/aot/RecompilationTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/compiler/aot/RecompilationTest.java Tue May 15 10:13:52 2018 -0700 @@ -37,26 +37,12 @@ * -extraopt -XX:+UnlockDiagnosticVMOptions -extraopt -XX:+WhiteBoxAPI -extraopt -Xbootclasspath/a:. * -extraopt -XX:-UseCompressedOops * -extraopt -XX:CompileCommand=dontinline,compiler.whitebox.SimpleTestCaseHelper::* - * @run main/othervm -Xmixed -Xbatch -XX:+UseAOT -XX:+TieredCompilation -XX:CompilationPolicyChoice=2 - * -XX:-UseCounterDecay -XX:-UseCompressedOops - * -XX:-Inline - * -XX:AOTLibrary=./libRecompilationTest1.so -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -Dcompiler.aot.RecompilationTest.check_level=1 - * compiler.aot.RecompilationTest * @run driver compiler.aot.AotCompiler -libname libRecompilationTest2.so * -class compiler.whitebox.SimpleTestCaseHelper * -extraopt -Dgraal.TieredAOT=false * -extraopt -XX:+UnlockDiagnosticVMOptions -extraopt -XX:+WhiteBoxAPI -extraopt -Xbootclasspath/a:. * -extraopt -XX:-UseCompressedOops * -extraopt -XX:CompileCommand=dontinline,compiler.whitebox.SimpleTestCaseHelper::* - * @run main/othervm -Xmixed -Xbatch -XX:+UseAOT -XX:+TieredCompilation -XX:CompilationPolicyChoice=2 - * -XX:-UseCounterDecay -XX:-UseCompressedOops - * -XX:-Inline - * -XX:AOTLibrary=./libRecompilationTest2.so -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI - * -Dcompiler.aot.RecompilationTest.check_level=-1 - * compiler.aot.RecompilationTest * @run main/othervm -Xmixed -Xbatch -XX:+UseAOT -XX:-TieredCompilation * -XX:-UseCounterDecay -XX:-UseCompressedOops * -XX:-Inline diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/compiler/tiered/ConstantGettersTransitionsTest.java --- a/test/hotspot/jtreg/compiler/tiered/ConstantGettersTransitionsTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/compiler/tiered/ConstantGettersTransitionsTest.java Tue May 15 10:13:52 2018 -0700 @@ -34,7 +34,6 @@ * @run main/othervm/timeout=240 -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -XX:+TieredCompilation -XX:-UseCounterDecay * -XX:CompileCommand=compileonly,compiler.tiered.ConstantGettersTransitionsTest$ConstantGettersTestCase$TrivialMethods::* - * compiler.tiered.TransitionsTestExecutor * compiler.tiered.ConstantGettersTransitionsTest */ @@ -200,4 +199,4 @@ } } } -} \ No newline at end of file +} diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/compiler/tiered/LevelTransitionTest.java --- a/test/hotspot/jtreg/compiler/tiered/LevelTransitionTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/compiler/tiered/LevelTransitionTest.java Tue May 15 10:13:52 2018 -0700 @@ -36,7 +36,6 @@ * -XX:+WhiteBoxAPI -XX:+TieredCompilation -XX:-UseCounterDecay * -XX:CompileCommand=compileonly,compiler.whitebox.SimpleTestCaseHelper::* * -XX:CompileCommand=compileonly,compiler.tiered.LevelTransitionTest$ExtendedTestCase$CompileMethodHolder::* - * compiler.tiered.TransitionsTestExecutor * compiler.tiered.LevelTransitionTest */ diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/compiler/tiered/TransitionsTestExecutor.java --- a/test/hotspot/jtreg/compiler/tiered/TransitionsTestExecutor.java Tue May 15 18:03:31 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package compiler.tiered; - -import compiler.whitebox.CompilerWhiteBoxTest; -import jdk.test.lib.process.OutputAnalyzer; -import jdk.test.lib.process.ProcessTools; - -import java.lang.management.ManagementFactory; -import java.lang.management.RuntimeMXBean; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Executes given test in a separate VM with enabled Tiered Compilation for - * CompilationPolicyChoice 2 and 3 - */ -public class TransitionsTestExecutor { - public static void main(String[] args) throws Throwable { - if (CompilerWhiteBoxTest.skipOnTieredCompilation(false)) { - return; - } - if (args.length != 1) { - throw new Error("TESTBUG: Test name should be specified"); - } - executeTestFor(2, args[0]); - executeTestFor(3, args[0]); - } - - private static void executeTestFor(int compilationPolicy, String testName) throws Throwable { - String policy = "-XX:CompilationPolicyChoice=" + compilationPolicy; - - // Get runtime arguments including VM options given to this executor - RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); - List vmArgs = runtime.getInputArguments(); - - // Construct execution command with compilation policy choice and test name - List args = new ArrayList<>(vmArgs); - Collections.addAll(args, policy, testName); - - OutputAnalyzer out = ProcessTools.executeTestJvm(args.toArray(new String[args.size()])); - out.shouldHaveExitValue(0); - } -} diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java --- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1.java Tue May 15 10:13:52 2018 -0700 @@ -51,6 +51,7 @@ {"CONCURRENT_MARK", "Concurrent Mark [^FR]"}, {"IDLE", null}, // Resume IDLE before testing subphases {"MARK_FROM_ROOTS", "Concurrent Mark From Roots"}, + {"PRECLEAN", "Concurrent Preclean"}, {"BEFORE_REMARK", null}, {"REMARK", "Pause Remark"}, {"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"}, diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java --- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlG1Basics.java Tue May 15 10:13:52 2018 -0700 @@ -51,6 +51,7 @@ "SCAN_ROOT_REGIONS", "CONCURRENT_MARK", "MARK_FROM_ROOTS", + "PRECLEAN", "BEFORE_REMARK", "REMARK", "REBUILD_REMEMBERED_SETS", diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/runtime/7116786/Test7116786.java --- a/test/hotspot/jtreg/runtime/7116786/Test7116786.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/runtime/7116786/Test7116786.java Tue May 15 10:13:52 2018 -0700 @@ -94,7 +94,7 @@ "Bad type on operand stack"), new Case("case02", "stackMapFrame.cpp", true, "get_local", - "local index out-of-bounds", + "local index out of bounds", "Local variable table overflow"), new Case("case03", "stackMapFrame.cpp", true, "get_local", @@ -102,7 +102,7 @@ "Bad local variable type"), new Case("case04", "stackMapFrame.cpp", true, "get_local_2", - "local index out-of-bounds [type2]", + "local index out of bounds [type2]", "get long/double overflows locals"), new Case("case05", "stackMapFrame.cpp", true, "get_local_2", @@ -115,11 +115,11 @@ "Bad local variable type"), new Case("case07", "stackMapFrame.cpp", true, "set_local", - "local index out-of-bounds", + "local index out of bounds", "Local variable table overflow"), new Case("case08", "stackMapFrame.cpp", true, "set_local_2", - "local index out-of-bounds [type2]", + "local index out of bounds [type2]", "Local variable table overflow"), new Case("case09", "stackMapFrame.hpp", true, "push_stack", @@ -322,7 +322,7 @@ "Stack map does not match the one at exception handler "), new Case("case54", "verifier.cpp", true, "verify_cp_index", - "constant pool index is out-of-bounds", + "constant pool index is out of bounds", "Illegal constant pool index "), new Case("case55", "verifier.cpp", true, "verify_cp_type", diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java --- a/test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/runtime/Metaspace/PrintMetaspaceDcmd.java Tue May 15 10:13:52 2018 -0700 @@ -33,8 +33,8 @@ * @library /test/lib * @modules java.base/jdk.internal.misc * java.management - * @run main/othervm -XX:MaxMetaspaceSize=201M -XX:+VerifyMetaspace -XX:+UseCompressedClassPointers PrintMetaspaceDcmd with-compressed-class-space - * @run main/othervm -XX:MaxMetaspaceSize=201M -XX:+VerifyMetaspace -XX:-UseCompressedClassPointers PrintMetaspaceDcmd without-compressed-class-space + * @run main/othervm -XX:MaxMetaspaceSize=201M -Xmx100M -XX:+UseCompressedOops -XX:+UseCompressedClassPointers PrintMetaspaceDcmd with-compressed-class-space + * @run main/othervm -XX:MaxMetaspaceSize=201M -Xmx100M -XX:-UseCompressedOops -XX:-UseCompressedClassPointers PrintMetaspaceDcmd without-compressed-class-space */ public class PrintMetaspaceDcmd { diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/runtime/appcds/jigsaw/modulepath/MainModuleOnly.java --- a/test/hotspot/jtreg/runtime/appcds/jigsaw/modulepath/MainModuleOnly.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/runtime/appcds/jigsaw/modulepath/MainModuleOnly.java Tue May 15 10:13:52 2018 -0700 @@ -173,5 +173,14 @@ "-m", TEST_MODULE1) .assertAbnormalExit( "A jar/jimage file is not the one used while building the shared archive file:"); + // create an archive with a non-empty directory in the --module-path. + // The dumping process will exit with an error due to non-empty directory + // in the --module-path. + output = TestCommon.createArchive(destJar.toString(), appClasses, + "-Xlog:class+load=trace", + "--module-path", MODS_DIR.toString(), + "-m", TEST_MODULE1); + output.shouldHaveExitValue(1) + .shouldMatch("Error: non-empty directory.*com.simple"); } } diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/runtime/exceptionMsgs/ArrayIndexOutOfBoundsException/ArrayIndexOutOfBoundsExceptionTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/hotspot/jtreg/runtime/exceptionMsgs/ArrayIndexOutOfBoundsException/ArrayIndexOutOfBoundsExceptionTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @summary Test extended ArrayIndexOutOfBoundsException message. The + * message lists information about the array and the indexes involved. + * @compile ArrayIndexOutOfBoundsExceptionTest.java + * @run testng ArrayIndexOutOfBoundsExceptionTest + * @run testng/othervm -Xcomp -XX:-TieredCompilation ArrayIndexOutOfBoundsExceptionTest + * @run testng/othervm -Xcomp -XX:TieredStopAtLevel=1 ArrayIndexOutOfBoundsExceptionTest + */ + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.ArrayList; + +import org.testng.annotations.Test; + +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNull; +import static org.testng.Assert.assertNotNull; +import static org.testng.Assert.assertTrue; +import static org.testng.Assert.fail; + +/** + * Tests the detailed messages of the ArrayIndexOutOfBoundsException. + */ +public class ArrayIndexOutOfBoundsExceptionTest { + + // Some fields used in the test. + static int[] staticArray = new int[0]; + static long[][] staticLongArray = new long[0][0]; + ArrayList names = new ArrayList<>(); + ArrayList curr; + + public static void main(String[] args) { + ArrayIndexOutOfBoundsExceptionTest t = new ArrayIndexOutOfBoundsExceptionTest(); + try { + t.testAIOOBMessages(); + } catch (Exception e) {} + } + + /** + * + */ + public static class ArrayGenerator { + + /** + * @param dummy1 + * @return Object Array + */ + public static Object[] arrayReturner(boolean dummy1) { + return new Object[0]; + } + + /** + * @param dummy1 + * @param dummy2 + * @param dummy3 + * @return Object Array + */ + public Object[] returnMyArray(double dummy1, long dummy2, short dummy3) { + return new Object[0]; + } + } + + /** + * + */ + @Test + public void testAIOOBMessages() { + boolean[] za1 = new boolean[0]; + byte[] ba1 = new byte[0]; + short[] sa1 = new short[0]; + char[] ca1 = new char[0]; + int[] ia1 = new int[0]; + long[] la1 = new long[0]; + float[] fa1 = new float[0]; + double[] da1 = new double[0]; + Object[] oa1 = new Object[10]; + Object[] oa2 = new Object[5]; + + boolean[] za2 = new boolean[10]; + boolean[] za3 = new boolean[5]; + byte[] ba2 = new byte[10]; + byte[] ba3 = new byte[5]; + short[] sa2 = new short[10]; + short[] sa3 = new short[5]; + char[] ca2 = new char[10]; + char[] ca3 = new char[5]; + int[] ia2 = new int[10]; + int[] ia3 = new int[5]; + long[] la2 = new long[10]; + long[] la3 = new long[5]; + float[] fa2 = new float[10]; + float[] fa3 = new float[5]; + double[] da2 = new double[10]; + double[] da3 = new double[5]; + + try { + System.out.println(za1[-5]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index -5 out of bounds for length 0"); + } + + try { + System.out.println(ba1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(sa1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(ca1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(ia1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(la1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(fa1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(da1[0]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(oa1[12]); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 12 out of bounds for length 10"); + } + + try { + System.out.println(za1[0] = false); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(ba1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(sa1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(ca1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(ia1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(la1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(fa1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(da1[0] = 0); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + try { + System.out.println(oa1[-2] = null); + fail(); + } + catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index -2 out of bounds for length 10"); + } + + try { + assertTrue((ArrayGenerator.arrayReturner(false))[0] == null); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + try { + staticArray[0] = 2; + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "Index 0 out of bounds for length 0"); + } + + // Test all five possible messages of arraycopy exceptions thrown in ObjArrayKlass::copy_array(). + + try { + System.arraycopy(oa1, -17, oa2, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: source index -17 out of bounds for object array[10]"); + } + + try { + System.arraycopy(oa1, 2, oa2, -18, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: destination index -18 out of bounds for object array[5]"); + } + + try { + System.arraycopy(oa1, 2, oa2, 0, -19); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: length -19 is negative"); + } + + try { + System.arraycopy(oa1, 8, oa2, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last source index 13 out of bounds for object array[10]"); + } + + try { + System.arraycopy(oa1, 1, oa2, 0, 7); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last destination index 7 out of bounds for object array[5]"); + } + + // Test all five possible messages of arraycopy exceptions thrown in TypeArrayKlass::copy_array(). + + try { + System.arraycopy(da2, -17, da3, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: source index -17 out of bounds for double[10]"); + } + + try { + System.arraycopy(da2, 2, da3, -18, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: destination index -18 out of bounds for double[5]"); + } + + try { + System.arraycopy(da2, 2, da3, 0, -19); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: length -19 is negative"); + } + + try { + System.arraycopy(da2, 8, da3, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last source index 13 out of bounds for double[10]"); + } + + try { + System.arraycopy(da2, 1, da3, 0, 7); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last destination index 7 out of bounds for double[5]"); + } + + // Test all possible basic types in the messages of arraycopy exceptions thrown in TypeArrayKlass::copy_array(). + + try { + System.arraycopy(za2, -17, za3, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: source index -17 out of bounds for boolean[10]"); + } + + try { + System.arraycopy(ba2, 2, ba3, -18, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: destination index -18 out of bounds for byte[5]"); + } + + try { + System.arraycopy(sa2, 2, sa3, 0, -19); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: length -19 is negative"); + } + + try { + System.arraycopy(ca2, 8, ca3, 0, 5); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last source index 13 out of bounds for char[10]"); + } + + try { + System.arraycopy(ia2, 2, ia3, 0, -19); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: length -19 is negative"); + } + + try { + System.arraycopy(la2, 1, la3, 0, 7); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last destination index 7 out of bounds for long[5]"); + } + + try { + System.arraycopy(fa2, 1, fa3, 0, 7); + fail(); + } catch (ArrayIndexOutOfBoundsException e) { + assertEquals(e.getMessage(), + "arraycopy: last destination index 7 out of bounds for float[5]"); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/serviceability/sa/ClhsdbCDSCore.java --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSCore.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSCore.java Tue May 15 10:13:52 2018 -0700 @@ -166,6 +166,9 @@ "No suitable match for type of address")); expStrMap.put("printall", List.of( "aload_0", + "_nofast_aload_0", + "_nofast_getfield", + "_nofast_putfield", "Constant Pool of", "public static void main(java.lang.String[])", "Bytecode", @@ -175,6 +178,8 @@ "invokedynamic")); unExpStrMap.put("printall", List.of( "sun.jvm.hotspot.types.WrongTypeException", + "illegal code", + "Failure occurred at bci", "No suitable match for type of address")); test.runOnCore(TEST_CDS_CORE_FILE_NAME, cmds, expStrMap, unExpStrMap); } catch (Exception ex) { diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/serviceability/sa/ClhsdbCDSJstackPrintAll.java --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSJstackPrintAll.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbCDSJstackPrintAll.java Tue May 15 10:13:52 2018 -0700 @@ -24,7 +24,7 @@ /* * @test * @bug 8174994 - * @summary Test the clhsdb commands 'jstack', 'printall' with CDS enabled + * @summary Test the clhsdb commands 'jstack', 'printall', 'where' with CDS enabled * @requires vm.cds * @library /test/lib * @run main/othervm/timeout=2400 -Xmx1g ClhsdbCDSJstackPrintAll @@ -77,7 +77,7 @@ return; } - cmds = List.of("jstack -v", "printall"); + cmds = List.of("jstack -v", "printall", "where -a"); Map> expStrMap = new HashMap<>(); Map> unExpStrMap = new HashMap<>(); @@ -92,6 +92,9 @@ "No suitable match for type of address")); expStrMap.put("printall", List.of( "aload_0", + "_nofast_aload_0", + "_nofast_getfield", + "_nofast_putfield", "Constant Pool of", "public static void main(java.lang.String[])", "Bytecode", @@ -100,7 +103,15 @@ "Exception Table", "invokedynamic")); unExpStrMap.put("printall", List.of( - "No suitable match for type of address")); + "No suitable match for type of address", + "illegal code", + "Failure occurred at bci")); + expStrMap.put("where -a", List.of( + "Java Stack Trace for main", + "public static void main")); + unExpStrMap.put("where -a", List.of( + "illegal code", + "Failure occurred at bci")); test.run(theApp.getPid(), cmds, expStrMap, unExpStrMap); } catch (Exception ex) { throw new RuntimeException("Test ERROR " + ex, ex); diff -r 9822dd521c15 -r d93ae85b18c1 test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Tue May 15 18:03:31 2018 +0530 +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,7 +71,14 @@ unExpStrMap.put("longConstant jtreg::test", List.of( "Error: java.lang.RuntimeException: No long constant named")); - test.run(theApp.getPid(), cmds, expStrMap, unExpStrMap); + String longConstantOutput = test.run(theApp.getPid(), cmds, expStrMap, unExpStrMap); + + if (longConstantOutput == null) { + // Output could be null due to attach permission issues + // and if we are skipping this. + return; + } + checkForTruncation(longConstantOutput); } catch (Exception ex) { throw new RuntimeException("Test ERROR " + ex, ex); } finally { @@ -79,4 +86,41 @@ } System.out.println("Test PASSED"); } + + private static void checkForTruncation(String longConstantOutput) throws Exception { + + // Expected values obtained from the hash_mask_in_place definition in markOop.hpp + + // Expected output snippet is of the form (on x64-64): + // ... + // longConstant VM_Version::CPU_SHA 17179869184 + // longConstant markOopDesc::biased_lock_bits 1 + // longConstant markOopDesc::age_shift 3 + // longConstant markOopDesc::hash_mask_in_place 549755813632 + // ... + + checkLongValue("markOopDesc::hash_mask_in_place", + longConstantOutput, + Platform.is64bit() ? 549755813632L: 4294967168L); + + String arch = System.getProperty("os.arch"); + if (arch.equals("amd64") || arch.equals("i386") || arch.equals("x86")) { + // Expected value obtained from the CPU_SHA definition in vm_version_x86.hpp + checkLongValue("VM_Version::CPU_SHA", + longConstantOutput, + 17179869184L); + } + } + + private static void checkLongValue(String constName, String longConstantOutput, + long checkValue) throws Exception { + + String[] snippets = longConstantOutput.split(constName); + String[] words = snippets[1].split("\\R"); + long readValue = Long.parseLong(words[0].trim()); + if (readValue != checkValue) { + throw new Exception ("Reading " + constName + ". Expected " + checkValue + + ". Obtained " + readValue + " instead."); + } + } } diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/ProblemList.txt --- a/test/jdk/ProblemList.txt Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/ProblemList.txt Tue May 15 10:13:52 2018 -0700 @@ -765,8 +765,6 @@ tools/pack200/CommandLineTests.java 8059906 generic-all -tools/launcher/FXLauncherTest.java 8068049 linux-all,macosx-all - tools/jimage/JImageExtractTest.java 8198405,8198819 generic-all tools/jimage/JImageListTest.java 8198405 windows-all diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/TEST.groups --- a/test/jdk/TEST.groups Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/TEST.groups Tue May 15 10:13:52 2018 -0700 @@ -25,23 +25,33 @@ # Tiered testing definitions # +# When adding tests to tier1, make sure they end up in one of the tier1_partX groups tier1 = \ - :jdk_lang \ + :tier1_part1 \ + :tier1_part2 \ + :tier1_part3 + +tier1_part1 = \ + :jdk_lang + +tier1_part2 = \ :jdk_util \ - :jdk_svc_sanity \ + -java/util/Arrays/TimSortStackSize2.java + +tier1_part3 = \ :build_sanity \ - sun/nio/cs/ISO8859x.java \ + :jdk_math \ + :jdk_svc_sanity \ java/nio/Buffer \ com/sun/crypto/provider/Cipher \ - :jdk_math \ - tools/pack200 \ - -java/util/Arrays/TimSortStackSize2.java + sun/nio/cs/ISO8859x.java \ + tools/pack200 +# When adding tests to tier2, make sure they end up in one of the tier2_partX groups tier2 = \ :tier2_part1 \ :tier2_part2 \ - :tier2_part3 \ - java/util/Arrays/TimSortStackSize2.java + :tier2_part3 # com/sun/crypto/provider/Cipher is in tier1 because of JDK-8132855 tier2_part1 = \ @@ -58,7 +68,9 @@ -sun/nio/cs/ISO8859x.java \ :jdk_other \ :jdk_text \ - :jdk_time + :jdk_time \ + java/util/Arrays/TimSortStackSize2.java + tier2_part3 = \ :jdk_net diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/lang/String/Strip.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/lang/String/Strip.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * @test + * @summary Basic strip, stripLeading, stripTrailing functionality + * @bug 8200377 + * @run main/othervm Strip + */ + +public class Strip { + public static void main(String... arg) { + testStrip(); + testWhitespace(); + } + + /* + * Test basic stripping routines + */ + static void testStrip() { + equal(" abc ".strip(), "abc"); + equal(" abc ".stripLeading(), "abc "); + equal(" abc ".stripTrailing(), " abc"); + equal(" abc\u2022 ".strip(), "abc\u2022"); + equal(" abc\u2022 ".stripLeading(), "abc\u2022 "); + equal(" abc\u2022 ".stripTrailing(), " abc\u2022"); + equal("".strip(), ""); + equal("".stripLeading(), ""); + equal("".stripTrailing(), ""); + equal("\b".strip(), "\b"); + equal("\b".stripLeading(), "\b"); + equal("\b".stripTrailing(), "\b"); + } + + /* + * Test full whitespace range + */ + static void testWhitespace() { + StringBuilder sb = new StringBuilder(64); + IntStream.range(1, 0xFFFF).filter(c -> Character.isWhitespace(c)) + .forEach(c -> sb.append((char)c)); + String whiteSpace = sb.toString(); + + String testString = whiteSpace + "abc" + whiteSpace; + equal(testString.strip(), "abc"); + equal(testString.stripLeading(), "abc" + whiteSpace); + equal(testString.stripTrailing(), whiteSpace + "abc"); + } + + /* + * Report difference in result. + */ + static void report(String message, String inputTag, String input, + String outputTag, String output) { + System.err.println(message); + System.err.println(); + System.err.println(inputTag); + System.err.println(input.codePoints() + .mapToObj(c -> (Integer)c) + .collect(Collectors.toList())); + System.err.println(); + System.err.println(outputTag); + System.err.println(output.codePoints() + .mapToObj(c -> (Integer)c) + .collect(Collectors.toList())); + throw new RuntimeException(); + } + + /* + * Raise an exception if the two inputs are not equivalent. + */ + static void equal(String input, String expected) { + if (input == null || expected == null || !expected.equals(input)) { + report("Failed equal", "Input:", input, "Expected:", expected); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/lang/reflect/callerCache/AccessTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/lang/reflect/callerCache/AccessTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.concurrent.Callable; + +/** + * Each nested class tests a member of a specific access. + * + * Caller class is cached when setAccessible is not used to suppress the access + * check and only access allowed. If not accessible, caller class is not cached. + */ +public class AccessTest { + public static class PublicConstructor implements Callable { + public Object call() throws Exception { + Constructor c = Members.class.getConstructor(); + return c.newInstance(); + } + } + + public static class PublicMethod extends Members implements Callable { + public Void call() throws Exception { + Method m = Members.class.getDeclaredMethod("publicMethod"); + m.invoke(new PublicMethod()); + return null; + } + } + + public static class ProtectedMethod extends Members implements Callable { + public Void call() throws Exception { + Method m = Members.class.getDeclaredMethod("protectedMethod"); + m.invoke(new ProtectedMethod()); + return null; + } + } + + /* + * private field is not accessible. So caller class is not cached. + */ + public static class PrivateMethod extends Members implements Callable { + public Void call() throws Exception { + Method m = Members.class.getDeclaredMethod("privateMethod"); + try { + m.invoke(new ProtectedMethod()); + } catch (IllegalAccessException e) { + } + return null; + } + } + + public static class PublicField extends Members implements Callable { + public Void call() throws Exception { + Field f = Members.class.getDeclaredField("publicField"); + f.get(new PublicField()); + return null; + } + } + + public static class ProtectedField extends Members implements Callable { + public Void call() throws Exception { + Field f = Members.class.getDeclaredField("protectedField"); + f.get(new ProtectedField()); + return null; + } + } + + /* + * private field is not accessible. So caller class is not cached. + */ + public static class PrivateField implements Callable { + public Void call() throws Exception { + Field f = Members.class.getDeclaredField("privateField"); + try { + f.get(new Members()); + } catch (IllegalAccessException e) { + } + return null; + } + } + + /* + * Validate final field + */ + public static class FinalField implements Callable { + final Field f; + final boolean isStatic; + public FinalField(String name) throws Exception { + this.f = Members.class.getDeclaredField(name); + this.isStatic = Modifier.isStatic(f.getModifiers()); + if (!Modifier.isFinal(f.getModifiers())) { + throw new RuntimeException("not a final field"); + } + makeFinalNonFinal(f); + } + public Void call() throws Exception { + Members obj = isStatic ? null : new Members(); + try { + f.set(obj, 20); + checkValue(obj, 20); + } catch (IllegalAccessException e) { + throw e; + } + return null; + } + + void checkValue(Object obj, int expected) throws Exception { + int value = (int) f.get(obj); + if (value != expected) { + throw new RuntimeException("unexpectd value: " + value); + } + } + } + + public static class PublicFinalField extends FinalField { + public PublicFinalField() throws Exception { + super("publicFinalField"); + } + } + + public static class PrivateFinalField extends FinalField { + public PrivateFinalField() throws Exception { + super("privateFinalField"); + } + } + + public static class PublicStaticFinalField extends FinalField { + public PublicStaticFinalField() throws Exception { + super("publicStaticFinalField"); + } + } + + public static class PrivateStaticFinalField extends FinalField { + public PrivateStaticFinalField() throws Exception { + super("privateStaticFinalField"); + } + } + + private static void makeFinalNonFinal(Field f) throws ReflectiveOperationException { + Field modifiers = Field.class.getDeclaredField("modifiers"); + modifiers.setAccessible(true); + modifiers.set(f, modifiers.getInt(f) & ~Modifier.FINAL); + f.setAccessible(true); + + if (Modifier.isFinal(f.getModifiers())) { + throw new RuntimeException("should be a non-final field"); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/lang/reflect/callerCache/Members.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/lang/reflect/callerCache/Members.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class Members { + public Members() {} + protected Members(boolean b) {} + private Members(int i) {} + + public void publicMethod() {} + protected void protectedMethod() {} + private void privateMethod() {} + + public Object publicField = new Object(); + protected Object protectedField = new Object(); + private Object privateField = new Object(); + + public final int publicFinalField = 10; + private final int privateFinalField = 10; + public static final int publicStaticFinalField = 10; + private static final int privateStaticFinalField = 10; +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/lang/reflect/callerCache/ReflectionCallerCacheTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/lang/reflect/callerCache/ReflectionCallerCacheTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8202113 + * @summary Test the caller class loader is not kept strongly reachable + * by reflection API + * @library /test/lib/ + * @build ReflectionCallerCacheTest Members jdk.test.lib.compiler.CompilerUtils + * @run testng/othervm ReflectionCallerCacheTest + */ + +import java.io.IOException; +import java.lang.ref.Cleaner; +import java.lang.ref.WeakReference; +import java.lang.reflect.*; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.BooleanSupplier; + +import jdk.test.lib.compiler.CompilerUtils; +import org.testng.annotations.BeforeTest; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +public class ReflectionCallerCacheTest { + private static final Path CLASSES = Paths.get("classes"); + private static final ReflectionCallerCacheTest TEST = new ReflectionCallerCacheTest(); + + @BeforeTest + public void setup() throws IOException { + String src = System.getProperty("test.src", "."); + String classpath = System.getProperty("test.classes", "."); + boolean rc = CompilerUtils.compile(Paths.get(src, "AccessTest.java"), CLASSES, "-cp", classpath); + if (!rc) { + throw new RuntimeException("fail compilation"); + } + } + @DataProvider(name = "memberAccess") + public Object[][] memberAccess() { + return new Object[][] { + { "AccessTest$PublicConstructor" }, + { "AccessTest$PublicMethod" }, + { "AccessTest$PublicField" }, + { "AccessTest$ProtectedMethod" }, + { "AccessTest$ProtectedField" }, + { "AccessTest$PrivateMethod" }, + { "AccessTest$PrivateField"}, + { "AccessTest$PublicFinalField"}, + { "AccessTest$PrivateFinalField"}, + { "AccessTest$PublicStaticFinalField"}, + { "AccessTest$PrivateStaticFinalField"} + }; + } + + // Keep the root of the reflective objects strongly reachable + private final Constructor publicConstructor; + private final Method publicMethod; + private final Method protectedMethod; + private final Method privateMethod; + private final Field publicField; + private final Field protectedField; + private final Field privateField; + + ReflectionCallerCacheTest() { + try { + this.publicConstructor = Members.class.getConstructor(); + this.publicMethod = Members.class.getDeclaredMethod("publicMethod"); + this.publicField = Members.class.getDeclaredField("publicField"); + this.protectedMethod = Members.class.getDeclaredMethod("protectedMethod"); + this.protectedField = Members.class.getDeclaredField("protectedField"); + this.privateMethod = Members.class.getDeclaredMethod("privateMethod"); + this.privateField = Members.class.getDeclaredField("privateField"); + } catch (ReflectiveOperationException e) { + throw new RuntimeException(e); + } + } + + @Test(dataProvider = "memberAccess") + private void load(String classname) throws Exception { + WeakReference weakLoader = loadAndRunClass(classname); + + // Force garbage collection to trigger unloading of class loader + new ForceGC().await(() -> weakLoader.get() == null); + + if (weakLoader.get() != null) { + throw new RuntimeException("Class " + classname + " not unloaded!"); + } + } + + private WeakReference loadAndRunClass(String classname) throws Exception { + try (TestLoader loader = new TestLoader()) { + // Load member access class with custom class loader + Class c = Class.forName(classname, true, loader); + // access the reflective member + Callable callable = (Callable) c.newInstance(); + callable.call(); + return new WeakReference<>(loader); + } + } + + static class TestLoader extends URLClassLoader { + static URL[] toURLs() { + try { + return new URL[] { CLASSES.toUri().toURL() }; + } catch (MalformedURLException e) { + throw new Error(e); + } + } + + TestLoader() { + super("testloader", toURLs(), ClassLoader.getSystemClassLoader()); + } + } + + /** + * Utility class to invoke System.gc() + */ + static class ForceGC { + private final CountDownLatch cleanerInvoked = new CountDownLatch(1); + private final Cleaner cleaner = Cleaner.create(); + + ForceGC() { + cleaner.register(new Object(), () -> cleanerInvoked.countDown()); + } + + void doit() { + try { + for (int i = 0; i < 10; i++) { + System.gc(); + if (cleanerInvoked.await(1L, TimeUnit.SECONDS)) { + return; + } + } + } catch (InterruptedException unexpected) { + throw new AssertionError("unexpected InterruptedException"); + } + } + + void await(BooleanSupplier s) { + for (int i = 0; i < 10; i++) { + if (s.getAsBoolean()) return; + doit(); + } + throw new AssertionError("failed to satisfy condition"); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/net/URL/OpenStream.java --- a/test/jdk/java/net/URL/OpenStream.java Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/java/net/URL/OpenStream.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,9 @@ */ /* @test - @bug 4064962 - @summary openStream should work even when not using proxies + * @bug 4064962 8202708 + * @summary openStream should work even when not using proxies and + * UnknownHostException is thrown as expected. */ import java.io.*; @@ -32,18 +33,36 @@ public class OpenStream { - static String badHttp = "http://foo.bar.baz/"; + private static final String badHttp = "http://foo.bar.baz/"; + private static final String badUnc = "file://h7qbp368oix47/not-exist.txt"; public static void main(String[] args) throws IOException { + testHttp(); + testUnc(); + } - URL u = new URL(badHttp); + static void testHttp() throws IOException { + checkThrows(badHttp); + } + + static void testUnc() throws IOException { + boolean isWindows = System.getProperty("os.name").startsWith("Windows"); + if (isWindows) { + checkThrows(badUnc); + } + } + + static void checkThrows(String url) throws IOException { + URL u = new URL(url); try { InputStream in = u.openStream(); - } catch (IOException x) { + } catch (UnknownHostException x) { + System.out.println("UnknownHostException is thrown as expected."); return; } - throw new RuntimeException("Expected UnknownHostException to be thrown"); + throw new RuntimeException("Expected UnknownHostException to be " + + "thrown for " + url); } +} -} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/nio/channels/SocketChannel/LingerOnClose.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/nio/channels/SocketChannel/LingerOnClose.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 8203059 + * @summary Test SocketChannel.close with SO_LINGER enabled + */ + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.StandardSocketOptions; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; + +public class LingerOnClose { + + private enum TestMode { + BLOCKING, + NON_BLOCKING, + NON_BLOCKING_AND_REGISTERED; + } + + public static void main(String[] args) throws IOException { + // blocking mode + test(TestMode.BLOCKING, -1); + test(TestMode.BLOCKING, 0); + test(TestMode.BLOCKING, 1); + + // non-blocking mode + test(TestMode.NON_BLOCKING, -1); + test(TestMode.NON_BLOCKING, 0); + test(TestMode.NON_BLOCKING, 1); + + // non-blocking mode, close while registered with Selector + test(TestMode.NON_BLOCKING_AND_REGISTERED, -1); + test(TestMode.NON_BLOCKING_AND_REGISTERED, 0); + test(TestMode.NON_BLOCKING_AND_REGISTERED, 1); + } + + /** + * Test closing a SocketChannel with SO_LINGER set to the given linger + * interval. If the linger interval is 0, it checks that the peer observes + * a connection reset (TCP RST). + */ + static void test(TestMode mode, int interval) throws IOException { + SocketChannel sc = null; + SocketChannel peer = null; + Selector sel = null; + + try (ServerSocketChannel ssc = ServerSocketChannel.open()) { + ssc.bind(new InetSocketAddress(InetAddress.getLocalHost(), 0)); + + // establish loopback connection + sc = SocketChannel.open(ssc.getLocalAddress()); + peer = ssc.accept(); + + // configured blocking mode and register with Selector if needed + if (mode != TestMode.BLOCKING) + sc.configureBlocking(false); + if (mode == TestMode.NON_BLOCKING_AND_REGISTERED) { + sel = Selector.open(); + sc.register(sel, SelectionKey.OP_READ); + sel.selectNow(); + } + + // enable or disable SO_LINGER + sc.setOption(StandardSocketOptions.SO_LINGER, interval); + + // close channel and flush Selector if needed + sc.close(); + if (mode == TestMode.NON_BLOCKING_AND_REGISTERED) + sel.selectNow(); + + // read other end of connection, expect EOF or RST + ByteBuffer bb = ByteBuffer.allocate(100); + try { + int n = peer.read(bb); + if (interval == 0) { + throw new RuntimeException("RST expected"); + } else if (n != -1) { + throw new RuntimeException("EOF expected"); + } + } catch (IOException ioe) { + if (interval != 0) { + // exception not expected + throw ioe; + } + } + } finally { + if (sc != null) sc.close(); + if (peer != null) peer.close(); + if (sel != null) sel.close(); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/java/rmi/server/UnicastServerRef/serialFilter/FilterUSRTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/rmi/server/UnicastServerRef/serialFilter/FilterUSRTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.InvalidClassException; +import java.io.ObjectInputFilter; +import java.io.Serializable; + +import java.rmi.Remote; +import java.rmi.RemoteException; +import java.rmi.UnmarshalException; + +import java.util.Objects; + +import sun.rmi.server.UnicastServerRef; +import sun.rmi.server.UnicastServerRef2; +import sun.rmi.transport.LiveRef; + +import org.testng.Assert; +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + + +/* + * @test + * @modules java.rmi/sun.rmi.registry + * java.rmi/sun.rmi.server + * java.rmi/sun.rmi.transport + * java.rmi/sun.rmi.transport.tcp + * @run testng/othervm FilterUSRTest + * @summary Check objects exported with ObjectInputFilters via internal UnicastServerRef(2) + */ +public class FilterUSRTest { + + /** + * Data to test serialFilter call counts. + * - name + * - Object + * - expected count of calls to checkInput. + * + * @return array of test data + */ + @DataProvider(name = "bindData") + static Object[][] bindObjects() { + Object[][] data = { + {"SimpleString", "SimpleString", 0}, + {"String", new XX("now is the time"), 1}, + {"String[]", new XX(new String[3]), 3}, + {"Long[4]", new XX(new Long[4]), 3}, + {"RejectME", new XX(new RejectME()), -1}, + }; + return data; + } + + /* + * Test exporting an object with a serialFilter using UnicastServerRef.exportObject(). + * Send some objects and check the number of calls to the serialFilter. + */ + @Test(dataProvider = "bindData") + public void UnicastServerRef(String name, Object obj, int expectedFilterCount) throws RemoteException { + try { + RemoteImpl impl = RemoteImpl.create(); + UnicastServerRef ref = new UnicastServerRef(new LiveRef(0), impl.checker); + + Echo client = (Echo) ref.exportObject(impl, null, false); + + int count = client.filterCount(obj); + System.out.printf("count: %d, obj: %s%n", count, obj); + Assert.assertEquals(count, expectedFilterCount, "wrong number of filter calls"); + } catch (RemoteException rex) { + if (expectedFilterCount == -1 && + UnmarshalException.class.equals(rex.getCause().getClass()) && + InvalidClassException.class.equals(rex.getCause().getCause().getClass())) { + return; // normal expected exception + } + rex.printStackTrace(); + Assert.fail("unexpected remote exception", rex); + } catch (Exception ex) { + Assert.fail("unexpected exception", ex); + } + } + + /* + * Test exporting an object with a serialFilter using UnicastServerRef2.exportObject() + * with explicit (but null) SocketFactories. + * Send some objects and check the number of calls to the serialFilter. + */ + @Test(dataProvider = "bindData") + public void UnicastServerRef2(String name, Object obj, int expectedFilterCount) throws RemoteException { + try { + RemoteImpl impl = RemoteImpl.create(); + UnicastServerRef2 ref = new UnicastServerRef2(0, null, null, impl.checker); + + Echo client = (Echo) ref.exportObject(impl, null, false); + + int count = client.filterCount(obj); + System.out.printf("count: %d, obj: %s%n", count, obj); + Assert.assertEquals(count, expectedFilterCount, "wrong number of filter calls"); + } catch (RemoteException rex) { + if (expectedFilterCount == -1 && + UnmarshalException.class.equals(rex.getCause().getClass()) && + InvalidClassException.class.equals(rex.getCause().getCause().getClass())) { + return; // normal expected exception + } + rex.printStackTrace(); + Assert.fail("unexpected remote exception", rex); + } catch (Exception rex) { + Assert.fail("unexpected exception", rex); + } + } + + /** + * A simple Serializable holding an object that is passed by value. + * It and its contents are checked by the filter. + */ + static class XX implements Serializable { + private static final long serialVersionUID = 362498820763181265L; + + final Object obj; + + XX(Object obj) { + this.obj = obj; + } + + public String toString() { + return super.toString() + "//" + Objects.toString(obj); + } + } + + interface Echo extends Remote { + int filterCount(Object obj) throws RemoteException; + } + + /** + * This remote object just counts the calls to the serialFilter + * and returns it. The caller can check the number against + * what was expected for the object passed as an argument. + * A new RemoteImpl is used for each test so the count starts at zero again. + */ + static class RemoteImpl implements Echo { + + private static final long serialVersionUID = 1L; + + transient Checker checker; + + static RemoteImpl create() throws RemoteException { + RemoteImpl impl = new RemoteImpl(new Checker()); + return impl; + } + + private RemoteImpl(Checker checker) throws RemoteException { + this.checker = checker; + } + + public int filterCount(Object obj) throws RemoteException { + return checker.count(); + } + + } + + /** + * A ObjectInputFilter that just counts when it is called. + */ + static class Checker implements ObjectInputFilter { + int count; + + @Override + public Status checkInput(ObjectInputFilter.FilterInfo info) { + if (info.serialClass() == RejectME.class) { + return Status.REJECTED; + } + count++; + return Status.UNDECIDED; + } + + public int count() { + return count; + } + } + + /** + * A class to be rejected by the filter. + */ + static class RejectME implements Serializable { + private static final long serialVersionUID = 2L; + } + +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/sun/security/tools/jarsigner/JarSigning.keystore Binary file test/jdk/sun/security/tools/jarsigner/JarSigning.keystore has changed diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/sun/security/tools/jarsigner/JarSigning.keystore.README --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/sun/security/tools/jarsigner/JarSigning.keystore.README Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,6 @@ +#JarSigning.keystore is generated with + +rm JarSigning.keystore +keytool -genkeypair -keystore JarSigning.keystore -storepass bbbbbb -keypass bbbbbb -alias b -dname CN=b -keyalg DSA +keytool -genkeypair -keystore JarSigning.keystore -storepass bbbbbb -keypass bbbbbb -alias c -dname CN=c -keyalg RSA +keytool -genkeypair -keystore JarSigning.keystore -storepass bbbbbb -keypass bbbbbb -alias d -dname CN=d -keyalg EC diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/sun/security/tools/jarsigner/JarSigningNonAscii.java --- a/test/jdk/sun/security/tools/jarsigner/JarSigningNonAscii.java Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/sun/security/tools/jarsigner/JarSigningNonAscii.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 4924188 + * @bug 4924188 8202816 * @summary sign a JAR file that has entry names with non-ASCII characters. * @modules jdk.jartool/sun.security.tools.jarsigner * @run main/othervm JarSigningNonAscii @@ -42,7 +42,6 @@ private static String keystore; public static void main(String[] args) throws Exception { - Security.setProperty("jdk.jar.disabledAlgorithms", ""); String srcDir = System.getProperty("test.src", "."); String destDir = System.getProperty("test.classes", "."); diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/sun/util/resources/cldr/Bug8202764.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/sun/util/resources/cldr/Bug8202764.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test + * @bug 8202764 + * @modules jdk.localedata + * @summary Checks time zone names are consistent with aliased ids, + * between DateFormatSymbols.getZoneStrings() and getDisplayName() + * of TimeZone/ZoneId classes + * @run testng/othervm Bug8202764 + */ + +import static org.testng.Assert.assertEquals; + +import java.time.ZoneId; +import java.time.format.TextStyle; +import java.text.DateFormatSymbols; +import java.util.Arrays; +import java.util.Locale; +import java.util.Set; +import java.util.TimeZone; + +import org.testng.annotations.Test; + +public class Bug8202764 { + + @Test + public void testAliasedTZs() { + Set zoneIds = ZoneId.getAvailableZoneIds(); + Arrays.stream(DateFormatSymbols.getInstance(Locale.US).getZoneStrings()) + .forEach(zone -> { + System.out.println(zone[0]); + TimeZone tz = TimeZone.getTimeZone(zone[0]); + assertEquals(zone[1], tz.getDisplayName(false, TimeZone.LONG, Locale.US)); + assertEquals(zone[2], tz.getDisplayName(false, TimeZone.SHORT, Locale.US)); + assertEquals(zone[3], tz.getDisplayName(true, TimeZone.LONG, Locale.US)); + assertEquals(zone[4], tz.getDisplayName(true, TimeZone.SHORT, Locale.US)); + if (zoneIds.contains(zone[0])) { + // Some of the ids, e.g. three-letter ids are not supported in ZoneId + ZoneId zi = tz.toZoneId(); + assertEquals(zone[5], zi.getDisplayName(TextStyle.FULL, Locale.US)); + assertEquals(zone[6], zi.getDisplayName(TextStyle.SHORT, Locale.US)); + } + }); + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/jlink/JLinkPluginsTest.java --- a/test/jdk/tools/jlink/JLinkPluginsTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/tools/jlink/JLinkPluginsTest.java Tue May 15 10:13:52 2018 -0700 @@ -77,14 +77,6 @@ helper.checkImage(imageDir, moduleName, res, null); } { - // Optimize Class.forName - String[] userOptions = {"--class-for-name"}; - String moduleName = "classforname"; - helper.generateDefaultJModule(moduleName, "composite2"); - Path imageDir = helper.generateDefaultImage(userOptions, moduleName).assertSuccess(); - helper.checkImage(imageDir, moduleName, null, null); - } - { // disable generate jli classes - JDK-8160063 String[] userOptions = {"--disable-plugin", "generate-jli-classes"}; String moduleName = "jlidisabled"; diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/launcher/FXLauncherTest.java --- a/test/jdk/tools/launcher/FXLauncherTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/jdk/tools/launcher/FXLauncherTest.java Tue May 15 10:13:52 2018 -0700 @@ -23,19 +23,28 @@ /* * @test - * @bug 8001533 8004547 8035782 + * @library /test/lib + * @build FXLauncherTest jdk.test.lib.compiler.CompilerUtils + * @bug 8001533 8004547 8035782 8202553 * @summary Test launching FX application with java -jar - * Test uses main method and blank main method, a jfx app class and an incorrest + * Test uses main method and blank main method, a jfx app class and an incorrect * jfx app class, a main-class for the manifest, a bogus one and none. + * Now that FX is no longer bundled with the JDK, this test uses a + * "mock" javafx.graphics module to test the FX launcher. It also verifies + * that FX is, in fact, not included with the JDK. * All should execute except the incorrect fx app class entries. * @run main/othervm FXLauncherTest - * @key intermittent headful */ import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import jdk.test.lib.compiler.CompilerUtils; + public class FXLauncherTest extends TestHelper { private static final String FX_MARKER_CLASS = "javafx.application.Application"; private static void line() { @@ -46,12 +55,21 @@ private static final File ManifestFile = new File("manifest.txt"); private static final File ScratchDir = new File("."); + private static final Path SRC_DIR = + TEST_SOURCES_DIR.toPath().resolve("mockfx/src"); + private static final Path MODS_DIR = Paths.get("mods"); + private static final String MODULE_DIR = MODS_DIR.toString(); + /* standard main class can be used as java main for fx app class */ static final String StdMainClass = "helloworld.HelloWorld"; static final String ExtMainClass = "helloworld.ExtHello"; static final String NonFXMainClass = "helloworld.HelloJava"; static int testcount = 0; + static final String LAUNCH_MODE_CLASS = "LM_CLASS"; + static final String LAUNCH_MODE_JAR = "LM_JAR"; + static final String LAUNCH_MODE_MODULE = "LM_MODULE"; + /* a main method and a blank. */ static final String[] MAIN_METHODS = { "public static void main(String[] args) { launch(args); }", @@ -68,41 +86,19 @@ List contents = new ArrayList<>(); contents.add("package helloworld;"); contents.add("import javafx.application.Application;"); - contents.add("import javafx.event.ActionEvent;"); - contents.add("import javafx.event.EventHandler;"); - contents.add("import javafx.scene.Scene;"); - contents.add("import javafx.scene.control.Button;"); - contents.add("import javafx.scene.layout.StackPane;"); contents.add("import javafx.stage.Stage;"); contents.add("public class HelloWorld extends Application {"); contents.add(mainmethod); contents.add("@Override"); contents.add("public void start(Stage primaryStage) {"); - contents.add(" primaryStage.setTitle(\"Hello World!\");"); - contents.add(" Button btn = new Button();"); - contents.add(" btn.setText(\"Say 'Hello World'\");"); - contents.add(" btn.setOnAction(new EventHandler() {"); - contents.add(" @Override"); - contents.add(" public void handle(ActionEvent event) {"); - contents.add(" System.out.println(\"Hello World!\");"); - contents.add(" }"); - contents.add(" });"); - contents.add(" StackPane root = new StackPane();"); - contents.add(" root.getChildren().add(btn);"); - contents.add(" primaryStage.setScene(new Scene(root, 300, 250));"); - contents.add("// primaryStage.show(); no GUI for auto tests. "); - contents.add(" System.out.println(\"HelloWorld.primaryStage.show();\");"); - contents.add(" System.out.println(\"Parameters:\");" ); - contents.add(" for(String p : getParameters().getUnnamed())"); - contents.add(" System.out.println(\"parameter: \" + p );" ); - contents.add(" System.exit(0);"); + contents.add(" throw new InternalError(\"should never get here\");"); contents.add("}"); contents.add("}"); // Create and compile java source. MainJavaFile = new File(mainClass + JAVA_FILE_EXT); createFile(MainJavaFile, contents); - compile("-d", ".", mainClass + JAVA_FILE_EXT); + doFxCompile("-d", ".", mainClass + JAVA_FILE_EXT); } catch (java.io.IOException ioe) { ioe.printStackTrace(); throw new RuntimeException("Failed creating HelloWorld."); @@ -123,7 +119,7 @@ // Create and compile java source. MainJavaFile = new File(mainClass + JAVA_FILE_EXT); createFile(MainJavaFile, contents); - compile("-cp", ".", "-d", ".", mainClass + JAVA_FILE_EXT); + doFxCompile("-cp", ".", "-d", ".", mainClass + JAVA_FILE_EXT); } catch (java.io.IOException ioe) { ioe.printStackTrace(); throw new RuntimeException("Failed creating ExtHello."); @@ -148,7 +144,7 @@ // Create and compile java source. MainJavaFile = new File(mainClass + JAVA_FILE_EXT); createFile(MainJavaFile, contents); - compile("-cp", ".", "-d", ".", mainClass + JAVA_FILE_EXT); + doFxCompile("-cp", ".", "-d", ".", mainClass + JAVA_FILE_EXT); } catch (java.io.IOException ioe) { ioe.printStackTrace(); throw new RuntimeException("Failed creating HelloJava."); @@ -206,6 +202,41 @@ } } + public static void compileFXModule() { + final String JAVAFX_GRAPHICS_MODULE = "javafx.graphics"; + + try { + // Compile mockfx/src/javafx.graphics/** into mods/javafx.graphics + boolean compiled + = CompilerUtils.compile(SRC_DIR.resolve(JAVAFX_GRAPHICS_MODULE), + MODS_DIR.resolve(JAVAFX_GRAPHICS_MODULE)); + + if (!compiled) { + throw new RuntimeException("Error compiling mock javafx.graphics module"); + } + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + } + + static void doFxCompile(String...compilerArgs) { + compileFXModule(); + + List fxCompilerArgs = new ArrayList<>(); + fxCompilerArgs.add("--module-path=" + MODULE_DIR); + fxCompilerArgs.add("--add-modules=javafx.graphics"); + fxCompilerArgs.addAll(Arrays.asList(compilerArgs)); + compile(fxCompilerArgs.toArray(new String[fxCompilerArgs.size()])); + } + + static TestResult doFxExec(String...cmds) { + List fxCmds = new ArrayList<>(); + fxCmds.addAll(Arrays.asList(cmds)); + fxCmds.add(1, "--module-path=" + MODULE_DIR); + fxCmds.add(2, "--add-modules=javafx.graphics"); + return doExec(fxCmds.toArray(new String[fxCmds.size()])); + } + /* * Set Main-Class and iterate main_methods. * Try launching with both -jar and -cp methods, with and without FX main @@ -240,14 +271,21 @@ createFile(ManifestFile, createManifestContents(StdMainClass, fxMC)); createJar(FXtestJar, ManifestFile); String sTestJar = FXtestJar.getAbsolutePath(); + String launchMode; final TestResult tr; if (useCP) { - tr = doExec(javaCmd, "-cp", sTestJar, StdMainClass, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-cp", sTestJar, StdMainClass, APP_PARMS[0], APP_PARMS[1]); + launchMode = LAUNCH_MODE_CLASS; } else { - tr = doExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + launchMode = LAUNCH_MODE_JAR; } tr.checkPositive(); - if (tr.testStatus && tr.contains("HelloWorld.primaryStage.show()")) { + if (tr.testStatus) { + if (!tr.contains(launchMode)) { + System.err.println("ERROR: Did not find " + + launchMode + " in output!"); + } for (String p : APP_PARMS) { if (!tr.contains(p)) { System.err.println("ERROR: Did not find " @@ -291,14 +329,21 @@ createFile(ManifestFile, createManifestContents(ExtMainClass, fxMC)); createJar(FXtestJar, ManifestFile); String sTestJar = FXtestJar.getAbsolutePath(); + String launchMode; final TestResult tr; if (useCP) { - tr = doExec(javaCmd, "-cp", sTestJar, ExtMainClass, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-cp", sTestJar, ExtMainClass, APP_PARMS[0], APP_PARMS[1]); + launchMode = LAUNCH_MODE_CLASS; } else { - tr = doExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + launchMode = LAUNCH_MODE_JAR; } tr.checkPositive(); - if (tr.testStatus && tr.contains("HelloWorld.primaryStage.show()")) { + if (tr.testStatus) { + if (!tr.contains(launchMode)) { + System.err.println("ERROR: Did not find " + + launchMode + " in output!"); + } for (String p : APP_PARMS) { if (!tr.contains(p)) { System.err.println("ERROR: Did not find " @@ -323,7 +368,7 @@ createFile(ManifestFile, createManifestContents(null, StdMainClass)); // No MC, but supply JAC createJar(FXtestJar, ManifestFile); String sTestJar = FXtestJar.getAbsolutePath(); - TestResult tr = doExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + TestResult tr = doFxExec(javaCmd, "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); tr.checkNegative(); // should abort if no Main-Class if (tr.testStatus) { if (!tr.contains("no main manifest attribute")) { @@ -363,9 +408,9 @@ final TestResult tr; if (useCP) { - tr = doExec(javaCmd, "-verbose:class", "-cp", sTestJar, NonFXMainClass, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-verbose:class", "-cp", sTestJar, NonFXMainClass, APP_PARMS[0], APP_PARMS[1]); } else { - tr = doExec(javaCmd, "-verbose:class", "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); + tr = doFxExec(javaCmd, "-verbose:class", "-jar", sTestJar, APP_PARMS[0], APP_PARMS[1]); } tr.checkPositive(); if (tr.testStatus) { @@ -390,7 +435,8 @@ } public static void main(String... args) throws Exception { - //check if fx is part of jdk + + // Ensure that FX is not part of jdk Class fxClass = null; try { fxClass = Class.forName(FX_MARKER_CLASS); @@ -398,20 +444,20 @@ // do nothing } if (fxClass != null) { - FXLauncherTest fxt = new FXLauncherTest(); - fxt.run(args); - if (testExitValue > 0) { - System.out.println("Total of " + testExitValue - + " failed. Test cases covered: " - + FXLauncherTest.testcount); - System.exit(1); - } else { - System.out.println("All tests pass. Test cases covered: " - + FXLauncherTest.testcount); - } + throw new RuntimeException("JavaFX modules erroneously included in the JDK"); + } + + FXLauncherTest fxt = new FXLauncherTest(); + fxt.run(args); + if (testExitValue > 0) { + System.out.println("Total of " + testExitValue + + " failed. Test cases covered: " + + FXLauncherTest.testcount); + System.exit(1); } else { - System.err.println("Warning: JavaFX components missing or not supported"); - System.err.println(" test passes vacuously."); - } + System.out.println("All tests pass. Test cases covered: " + + FXLauncherTest.testcount); + } } + } diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/launcher/mockfx/src/javafx.graphics/com/sun/javafx/application/LauncherImpl.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/tools/launcher/mockfx/src/javafx.graphics/com/sun/javafx/application/LauncherImpl.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.sun.javafx.application; + +/** + * Mock JavaFX LauncherImpl class, used by FXLauncherTest. + */ +public class LauncherImpl { + /** + * This method is called by the Java launcher. + * + * @param launchName The path to a jar file, the application class name to launch, + * or the module and optional class name to launch + * @param launchMode The method of launching the application, one of LM_JAR, + * LM_CLASS, or LM_MODULE + * @param args Application arguments from the command line + */ + public static void launchApplication(final String launchName, + final String launchMode, + final String[] args) { + + System.out.println("LaunchName: " + launchName); + System.out.println("LaunchMode: " + launchMode); + System.out.println("Parameters:"); + for (String arg : args) { + System.out.println("parameter: " + arg); + } + System.exit(0); + } + + private LauncherImpl() { + throw new InternalError("should not get here"); + } + +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/launcher/mockfx/src/javafx.graphics/javafx/application/Application.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/tools/launcher/mockfx/src/javafx.graphics/javafx/application/Application.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package javafx.application; + +import javafx.stage.Stage; + +/** + * Mock JavaFX Application class, used by FXLauncherTest. + */ +public abstract class Application { + public static void launch(Class appClass, String... args) { + throw new InternalError("should not get here"); + } + + public static void launch(String... args) { + throw new InternalError("should not get here"); + } + + public Application() {} + + public abstract void start(Stage primaryStage) throws Exception; +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/launcher/mockfx/src/javafx.graphics/javafx/stage/Stage.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/tools/launcher/mockfx/src/javafx.graphics/javafx/stage/Stage.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package javafx.stage; + +/** + * Mock JavaFX Stage class, used by FXLauncherTest. + */ +public class Stage { + + public Stage() {} +} diff -r 9822dd521c15 -r d93ae85b18c1 test/jdk/tools/launcher/mockfx/src/javafx.graphics/module-info.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/tools/launcher/mockfx/src/javafx.graphics/module-info.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * Mock javafx.graphics module with just those packages / classes needed + * to test the FX launcher. + */ +module javafx.graphics { + exports javafx.application; + exports javafx.stage; + exports com.sun.javafx.application to + java.base; +} diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/jdk/javadoc/tool/EnablePreviewOption.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/langtools/jdk/javadoc/tool/EnablePreviewOption.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8199196 + * @summary Test --enable-preview option in javadoc + * @modules jdk.javadoc/jdk.javadoc.internal.tool + * @library /tools/lib + * @build toolbox.ToolBox toolbox.TestRunner + * @run main EnablePreviewOption + */ + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +import jdk.javadoc.internal.tool.Main; +import jdk.javadoc.internal.tool.Main.Result; + +import static jdk.javadoc.internal.tool.Main.Result.*; + +import toolbox.TestRunner; +import toolbox.ToolBox; + +public class EnablePreviewOption extends TestRunner { + public static void main(String... args) throws Exception { + new EnablePreviewOption().runTests(); + } + + ToolBox tb = new ToolBox(); + + Path file = Paths.get("C.java"); + String thisVersion = System.getProperty("java.specification.version"); + String prevVersion = String.valueOf(Integer.valueOf(thisVersion) - 1); + + EnablePreviewOption() throws IOException { + super(System.err); + tb.writeFile(file, "public class C { }"); + } + + @Test + public void testSource() { + runTest(List.of("--enable-preview", "-source", thisVersion), + OK, + out -> !out.contains("error") + && out.contains("Building tree for all the packages and classes...")); + } + + @Test + public void testRelease() { + runTest(List.of("--enable-preview", "--release", thisVersion), + OK, + out -> !out.contains("error") + && out.contains("Building tree for all the packages and classes...")); + } + + @Test + public void testNoVersion() { + runTest(List.of("--enable-preview"), + CMDERR, + out -> out.contains("error: --enable-preview must be used with either -source or --release")); + } + + @Test + public void testBadSource() { + runTest(List.of("--enable-preview", "-source", "BAD"), + ERROR, + out -> out.contains("error: invalid source release: BAD")); + } + + @Test + public void testOldSource() { + runTest(List.of("--enable-preview", "-source", prevVersion), + CMDERR, + out -> out.matches("(?s)error: invalid source release .* with --enable-preview.*")); + } + + private void runTest(List options, Result expectedResult, Predicate validate) { + System.err.println("running with options: " + options); + List args = new ArrayList<>(); + args.addAll(options); + args.add("-XDrawDiagnostics"); + args.add(file.toString()); + StringWriter out = new StringWriter(); + PrintWriter pw = new PrintWriter(out); + int actualResult = Main.execute(args.toArray(new String[0]), pw); + System.err.println("actual result=" + actualResult); + System.err.println("actual output=" + out.toString()); + if (actualResult != expectedResult.exitCode) + error("Exit code not as expected"); + if (!validate.test(out.toString())) { + error("Output not as expected"); + } + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/T8194998/BrokenErrorMessageTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/langtools/tools/javac/T8194998/BrokenErrorMessageTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,19 @@ +/* + * @test /nodynamiccopyright/ + * @bug 8194998 + * @summary broken error message for subclass of interface with private method + * @compile/fail/ref=BrokenErrorMessageTest.out -XDrawDiagnostics BrokenErrorMessageTest.java + */ + +class BrokenErrorMessageTest { + void foo() { + // there is no error in this case but it is an interesting test, ::test is a member of I so this is acceptable + Runnable test1 = ((I)(new I() {}))::test; + // ::test is not a member of any subclass of I as it is private + Runnable test2 = ((new I() {}))::test; + } + + interface I { + private void test() {} + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/T8194998/BrokenErrorMessageTest.out --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/langtools/tools/javac/T8194998/BrokenErrorMessageTest.out Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,2 @@ +BrokenErrorMessageTest.java:13:26: compiler.err.invalid.mref: kindname.method, (compiler.misc.cant.resolve.args: kindname.method, test, , ) +1 error diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/T8202597/NotionalInterfaceNotBeingInducedTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/langtools/tools/javac/T8202597/NotionalInterfaceNotBeingInducedTest.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8202597 + * @summary javac is not inducing a notional interface if Object appears in an intersection type + * @compile NotionalInterfaceNotBeingInducedTest.java + */ + +class NotionalInterfaceNotBeingInducedTest { + interface I {} + interface J { void foo(); } + + public void test() { + Object o1 = (I & J) System::gc; + Object o2 = (J) System::gc; + Object o3 = (Object & J) System::gc; + Object o4 = (Object & I & J) System::gc; + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/diags/examples/BadReference.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/langtools/tools/javac/diags/examples/BadReference.java Tue May 15 10:13:52 2018 -0700 @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +// key: compiler.err.invalid.mref +// key: compiler.misc.cant.resolve.args + +class BadReference { + void foo() { + // ::test is not a member of any subclass of I as it is private + Runnable test2 = ((new I() {}))::test; + } + + interface I { + private void test() {} + } +} diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/diags/examples/NotAnInterfaceComponent.java --- a/test/langtools/tools/javac/diags/examples/NotAnInterfaceComponent.java Tue May 15 18:03:31 2018 +0530 +++ b/test/langtools/tools/javac/diags/examples/NotAnInterfaceComponent.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,5 +26,5 @@ // key: compiler.misc.not.an.intf.component class NotAnInterfaceComponent { - Object o = (Object & Runnable) ()-> { }; + Object o = (String & Runnable) ()-> { }; } diff -r 9822dd521c15 -r d93ae85b18c1 test/langtools/tools/javac/lambda/intersection/IntersectionTargetTypeTest.java --- a/test/langtools/tools/javac/lambda/intersection/IntersectionTargetTypeTest.java Tue May 15 18:03:31 2018 +0530 +++ b/test/langtools/tools/javac/lambda/intersection/IntersectionTargetTypeTest.java Tue May 15 10:13:52 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -276,7 +276,7 @@ if (ek.isFunctional) { List mks = new ArrayList<>(); for (TypeKind tk : cInfo.types) { - if (tk.boundKind == BoundKind.CLASS) { + if (tk.boundKind == BoundKind.CLASS && !tk.typeStr.equals("Object")) { errorExpected = true; break; } else {