# HG changeset patch # User duke # Date 1499283327 -7200 # Node ID ee1b8619eddba3fa29eb5c3cb671fb39d1ba3075 # Parent 3414aeff4a800e0be8271f7701f83651a497950d# Parent cf820d974c750a3a623a5e8eccb143629b962d53 Merge diff -r 3414aeff4a80 -r ee1b8619eddb .hgtags-top-repo --- a/.hgtags-top-repo Mon Apr 18 16:18:56 2016 +0100 +++ b/.hgtags-top-repo Wed Jul 05 21:35:27 2017 +0200 @@ -356,3 +356,4 @@ f900d5afd9c83a0df8f36161c27c5e4c86a66f4c jdk-9+111 03543a758cd5890f2266e4b9678378a925dde22a jdk-9+112 55b6d550828d1223b364e6ead4a56e56411c56df jdk-9+113 +1d992540870ff33fe6cc550443388588df9b9e4f jdk-9+114 diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/boot-jdk.m4 --- a/common/autoconf/boot-jdk.m4 Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/boot-jdk.m4 Wed Jul 05 21:35:27 2017 +0200 @@ -305,7 +305,7 @@ BOOT_JDK_SOURCETARGET="-source 8 -target 8" AC_SUBST(BOOT_JDK_SOURCETARGET) - ADD_JVM_ARG_IF_OK([-Xpatch:], dummy, [$JAVA]) + ADD_JVM_ARG_IF_OK([-Xpatch:foo=bar], dummy, [$JAVA]) AC_MSG_CHECKING([if Boot JDK supports modules]) if test "x$JVM_ARG_OK" = "xtrue"; then AC_MSG_RESULT([yes]) @@ -444,9 +444,9 @@ BUILD_JDK_VERSION=`"$BUILD_JDK/bin/java" -version 2>&1 | head -n 1` # Extra M4 quote needed to protect [] in grep expression. - [FOUND_CORRECT_VERSION=`echo $BUILD_JDK_VERSION | grep '\"1\.[9]\.'`] + [FOUND_CORRECT_VERSION=`echo $BUILD_JDK_VERSION | $EGREP '\"9([\.+-].*)?\"'`] if test "x$FOUND_CORRECT_VERSION" = x; then - AC_MSG_NOTICE([Potential Boot JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring]) + AC_MSG_NOTICE([Potential Build JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring]) AC_MSG_NOTICE([(Your Build JDK must be version 9)]) BUILD_JDK_FOUND=no else diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/build-performance.m4 --- a/common/autoconf/build-performance.m4 Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/build-performance.m4 Wed Jul 05 21:35:27 2017 +0200 @@ -454,7 +454,7 @@ AC_MSG_RESULT([$ENABLE_JAVAC_SERVER]) AC_SUBST(ENABLE_JAVAC_SERVER) - if test "x$ENABLE_JAVAC_SERVER" = "xyes" || "x$ENABLE_SJAVAC" = "xyes"; then + if test "x$ENABLE_JAVAC_SERVER" = "xyes" || test "x$ENABLE_SJAVAC" = "xyes"; then # When using a server javac, the small client instances do not need much # resources. JAVA_FLAGS_JAVAC="$JAVA_FLAGS_SMALL" diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/configure.ac --- a/common/autoconf/configure.ac Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/configure.ac Wed Jul 05 21:35:27 2017 +0200 @@ -227,6 +227,7 @@ HOTSPOT_SETUP_BUILD_TWEAKS JDKOPT_DETECT_INTREE_EC +JDKOPT_ENABLE_DISABLE_FAILURE_HANDLER ############################################################################### # diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/generated-configure.sh --- a/common/autoconf/generated-configure.sh Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/generated-configure.sh Wed Jul 05 21:35:27 2017 +0200 @@ -650,6 +650,7 @@ JOBS MEMORY_SIZE NUM_CORES +BUILD_FAILURE_HANDLER ENABLE_INTREE_EC HOTSPOT_MAKE_ARGS LIBZIP_CAN_USE_MMAP @@ -1172,6 +1173,7 @@ with_dxsdk with_dxsdk_lib with_dxsdk_include +enable_jtreg_failure_handler with_num_cores with_memory_size with_jobs @@ -1940,6 +1942,12 @@ disable bundling of the freetype library with the build result [enabled on Windows or when using --with-freetype, disabled otherwise] + --enable-jtreg-failure-handler + forces build of the jtreg failure handler to be + enabled, missing dependencies become fatal errors. + Default is auto, where the failure handler is built + if all dependencies are present and otherwise just + disabled. --enable-sjavac use sjavac to do fast incremental compiles [disabled] --disable-javac-server disable javac server [enabled] @@ -4274,6 +4282,12 @@ # +################################################################################ +# +# Check if building of the jtreg failure handler should be enabled. +# + + # # Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -4950,7 +4964,7 @@ #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1458755892 +DATE_WHEN_GENERATED=1460963400 ############################################################################### # @@ -29770,13 +29784,13 @@ - $ECHO "Check if jvm arg is ok: -Xpatch:" >&5 - $ECHO "Command: $JAVA -Xpatch: -version" >&5 - OUTPUT=`$JAVA -Xpatch: -version 2>&1` + $ECHO "Check if jvm arg is ok: -Xpatch:foo=bar" >&5 + $ECHO "Command: $JAVA -Xpatch:foo=bar -version" >&5 + OUTPUT=`$JAVA -Xpatch:foo=bar -version 2>&1` FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - dummy="$dummy -Xpatch:" + dummy="$dummy -Xpatch:foo=bar" JVM_ARG_OK=true else $ECHO "Arg failed:" >&5 @@ -29856,10 +29870,10 @@ BUILD_JDK_VERSION=`"$BUILD_JDK/bin/java" -version 2>&1 | head -n 1` # Extra M4 quote needed to protect [] in grep expression. - FOUND_CORRECT_VERSION=`echo $BUILD_JDK_VERSION | grep '\"1\.[9]\.'` + FOUND_CORRECT_VERSION=`echo $BUILD_JDK_VERSION | $EGREP '\"9([\.+-].*)?\"'` if test "x$FOUND_CORRECT_VERSION" = x; then - { $as_echo "$as_me:${as_lineno-$LINENO}: Potential Boot JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring" >&5 -$as_echo "$as_me: Potential Boot JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring" >&6;} + { $as_echo "$as_me:${as_lineno-$LINENO}: Potential Build JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring" >&5 +$as_echo "$as_me: Potential Build JDK found at $BUILD_JDK is incorrect JDK version ($BUILD_JDK_VERSION); ignoring" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: (Your Build JDK must be version 9)" >&5 $as_echo "$as_me: (Your Build JDK must be version 9)" >&6;} BUILD_JDK_FOUND=no @@ -62037,6 +62051,45 @@ + # Check whether --enable-jtreg-failure-handler was given. +if test "${enable_jtreg_failure_handler+set}" = set; then : + enableval=$enable_jtreg_failure_handler; +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if jtreg failure handler should be built" >&5 +$as_echo_n "checking if jtreg failure handler should be built... " >&6; } + + if test "x$enable_jtreg_failure_handler" = "xyes"; then + if test "x$JT_HOME" = "x"; then + as_fn_error $? "Cannot enable jtreg failure handler without jtreg." "$LINENO" 5 + else + BUILD_FAILURE_HANDLER=true + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, forced" >&5 +$as_echo "yes, forced" >&6; } + fi + elif test "x$enable_jtreg_failure_handler" = "xno"; then + BUILD_FAILURE_HANDLER=false + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, forced" >&5 +$as_echo "no, forced" >&6; } + elif test "x$enable_jtreg_failure_handler" = "xauto" \ + || test "x$enable_jtreg_failure_handler" = "x"; then + if test "x$JT_HOME" = "x"; then + BUILD_FAILURE_HANDLER=false + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, missing jtreg" >&5 +$as_echo "no, missing jtreg" >&6; } + else + BUILD_FAILURE_HANDLER=true + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, jtreg present" >&5 +$as_echo "yes, jtreg present" >&6; } + fi + else + as_fn_error $? "Invalid value for --enable-jtreg-failure-handler: $enable_jtreg_failure_handler" "$LINENO" 5 + fi + + + + ############################################################################### # # Configure parts of the build that only affect the build performance, @@ -62510,7 +62563,7 @@ $as_echo "$ENABLE_JAVAC_SERVER" >&6; } - if test "x$ENABLE_JAVAC_SERVER" = "xyes" || "x$ENABLE_SJAVAC" = "xyes"; then + if test "x$ENABLE_JAVAC_SERVER" = "xyes" || test "x$ENABLE_SJAVAC" = "xyes"; then # When using a server javac, the small client instances do not need much # resources. JAVA_FLAGS_JAVAC="$JAVA_FLAGS_SMALL" diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/jdk-options.m4 --- a/common/autoconf/jdk-options.m4 Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/jdk-options.m4 Wed Jul 05 21:35:27 2017 +0200 @@ -408,7 +408,7 @@ ################################################################################ # -# jlink options. +# jlink options. # We always keep packaged modules in JDK image. # AC_DEFUN_ONCE([JDKOPT_SETUP_JLINK_OPTIONS], @@ -433,3 +433,42 @@ AC_SUBST(JLINK_KEEP_PACKAGED_MODULES) ]) + +################################################################################ +# +# Check if building of the jtreg failure handler should be enabled. +# +AC_DEFUN_ONCE([JDKOPT_ENABLE_DISABLE_FAILURE_HANDLER], +[ + AC_ARG_ENABLE([jtreg-failure-handler], [AS_HELP_STRING([--enable-jtreg-failure-handler], + [forces build of the jtreg failure handler to be enabled, missing dependencies + become fatal errors. Default is auto, where the failure handler is built if all + dependencies are present and otherwise just disabled.])]) + + AC_MSG_CHECKING([if jtreg failure handler should be built]) + + if test "x$enable_jtreg_failure_handler" = "xyes"; then + if test "x$JT_HOME" = "x"; then + AC_MSG_ERROR([Cannot enable jtreg failure handler without jtreg.]) + else + BUILD_FAILURE_HANDLER=true + AC_MSG_RESULT([yes, forced]) + fi + elif test "x$enable_jtreg_failure_handler" = "xno"; then + BUILD_FAILURE_HANDLER=false + AC_MSG_RESULT([no, forced]) + elif test "x$enable_jtreg_failure_handler" = "xauto" \ + || test "x$enable_jtreg_failure_handler" = "x"; then + if test "x$JT_HOME" = "x"; then + BUILD_FAILURE_HANDLER=false + AC_MSG_RESULT([no, missing jtreg]) + else + BUILD_FAILURE_HANDLER=true + AC_MSG_RESULT([yes, jtreg present]) + fi + else + AC_MSG_ERROR([Invalid value for --enable-jtreg-failure-handler: $enable_jtreg_failure_handler]) + fi + + AC_SUBST(BUILD_FAILURE_HANDLER) +]) diff -r 3414aeff4a80 -r ee1b8619eddb common/autoconf/spec.gmk.in --- a/common/autoconf/spec.gmk.in Mon Apr 18 16:18:56 2016 +0100 +++ b/common/autoconf/spec.gmk.in Wed Jul 05 21:35:27 2017 +0200 @@ -260,6 +260,8 @@ BUILD_HOTSPOT=@BUILD_HOTSPOT@ +BUILD_FAILURE_HANDLER := @BUILD_FAILURE_HANDLER@ + # The boot jdk to use. This is overridden in bootcycle-spec.gmk. Make sure to keep # it in sync. BOOT_JDK:=@BOOT_JDK@ diff -r 3414aeff4a80 -r ee1b8619eddb common/bin/compare.sh --- a/common/bin/compare.sh Mon Apr 18 16:18:56 2016 +0100 +++ b/common/bin/compare.sh Wed Jul 05 21:35:27 2017 +0200 @@ -41,7 +41,7 @@ STAT_PRINT_SIZE="-f %z" elif [ "$OPENJDK_TARGET_OS" = "windows" ]; then FULLDUMP_CMD="$DUMPBIN -all" - LDD_CMD="$DUMPBIN -dependants | $GREP .dll" + LDD_CMD="$DUMPBIN -dependents" DIS_CMD="$DUMPBIN -disasm:nobytes" STAT_PRINT_SIZE="-c %s" elif [ "$OPENJDK_TARGET_OS" = "aix" ]; then @@ -824,12 +824,25 @@ # Check dependencies if [ -n "$LDD_CMD" ]; then - (cd $FILE_WORK_DIR && $CP $OTHER_FILE . && $LDD_CMD $NAME 2>/dev/null | $AWK '{ print $1;}' | $SORT | $TEE $WORK_FILE_BASE.deps.other | $UNIQ > $WORK_FILE_BASE.deps.other.uniq) - (cd $FILE_WORK_DIR && $CP $THIS_FILE . && $LDD_CMD $NAME 2 $WORK_FILE_BASE.deps.this.uniq) + if [ "$OPENJDK_TARGET_OS" = "windows" ]; then + LDD_FILTER="$GREP \.dll" + else + LDD_FILTER="$CAT" + fi + (cd $FILE_WORK_DIR && $CP $OTHER_FILE . && $LDD_CMD $NAME 2>/dev/null \ + | $LDD_FILTER | $AWK '{ print $1;}' | $SORT \ + | $TEE $WORK_FILE_BASE.deps.other \ + | $UNIQ > $WORK_FILE_BASE.deps.other.uniq) + (cd $FILE_WORK_DIR && $CP $THIS_FILE . && $LDD_CMD $NAME 2 $WORK_FILE_BASE.deps.this.uniq) (cd $FILE_WORK_DIR && $RM -f $NAME) - LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this > $WORK_FILE_BASE.deps.diff - LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq > $WORK_FILE_BASE.deps.diff.uniq + LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this \ + > $WORK_FILE_BASE.deps.diff + LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq \ + > $WORK_FILE_BASE.deps.diff.uniq if [ -s $WORK_FILE_BASE.deps.diff ]; then if [ -s $WORK_FILE_BASE.deps.diff.uniq ]; then diff -r 3414aeff4a80 -r ee1b8619eddb common/conf/jib-profiles.js --- a/common/conf/jib-profiles.js Mon Apr 18 16:18:56 2016 +0100 +++ b/common/conf/jib-profiles.js Wed Jul 05 21:35:27 2017 +0200 @@ -214,7 +214,7 @@ var getJibProfilesCommon = function (input) { var common = { dependencies: ["boot_jdk", "gnumake", "jtreg"], - configure_args: ["--with-default-make-target=all"], + configure_args: ["--with-default-make-target=all", "--enable-jtreg-failure-handler"], configure_args_32bit: ["--with-target-bits=32", "--with-jvm-variants=client,server"], configure_args_debug: ["--enable-debug"], configure_args_slowdebug: ["--with-debug-level=slowdebug"], diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/.hgtags --- a/hotspot/.hgtags Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/.hgtags Wed Jul 05 21:35:27 2017 +0200 @@ -516,3 +516,4 @@ c558850fac5750d8ca98a45180121980f57cdd28 jdk-9+111 76582e8dc9e6374e4f99ab797c8d364b6e9449b4 jdk-9+112 c569f8d89269fb6205b90f727581eb8cc04132f9 jdk-9+113 +b64432bae5271735fd53300b2005b713e98ef411 jdk-9+114 diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/aix/adlc_updater --- a/hotspot/make/aix/adlc_updater Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/aix/adlc_updater Wed Jul 05 21:35:27 2017 +0200 @@ -9,12 +9,15 @@ # fix_lines() { # repair bare #line directives in $1 to refer to $2 - awk < $1 > $1+ ' + # and add an override of __FILE__ with just the basename on the + # first line of the file. + awk < $1 > $1+ -v F2=$2 ' + BEGIN { print "#line 1 \"" F2 "\""; } /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} {print} - ' F2=$2 + ' mv $1+ $1 } -fix_lines $2/$1 $3/$1 +fix_lines $2/$1 $1 [ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \ ( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/aix/makefiles/trace.make --- a/hotspot/make/aix/makefiles/trace.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/aix/makefiles/trace.make Wed Jul 05 21:35:27 2017 +0200 @@ -27,14 +27,17 @@ # # It knows how to build and run the tools to generate trace files. -include $(GAMMADIR)/make/linux/makefiles/rules.make +include $(GAMMADIR)/make/aix/makefiles/rules.make include $(GAMMADIR)/make/altsrc.make # ######################################################################### -HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ - echo "true"; else echo "false";\ - fi) +HAS_ALT_SRC := false +ifndef OPENJDK + ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), ) + HAS_ALT_SRC := true + endif +endif TOPDIR = $(shell echo `pwd`) GENERATED = $(TOPDIR)/../generated @@ -50,23 +53,30 @@ TraceGeneratedNames = \ traceEventClasses.hpp \ - traceEventIds.hpp \ - traceTypes.hpp + traceEventIds.hpp \ + traceTypes.hpp ifeq ($(HAS_ALT_SRC), true) -TraceGeneratedNames += \ - traceRequestables.hpp \ - traceEventControl.hpp + TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp endif TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen -XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ - $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +TraceXml = $(TraceSrcDir)/trace.xml ifeq ($(HAS_ALT_SRC), true) - XML_DEPS += $(TraceAltSrcDir)/traceevents.xml + TraceXml = $(TraceAltSrcDir)/trace.xml +endif + +XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \ + $(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \ + $(TraceAltSrcDir)/traceeventtypes.xml endif .PHONY: all clean cleanall @@ -79,26 +89,26 @@ $(QUIETLY) echo $(LOG_INFO) Generating $@; \ $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@ -$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) +$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) $(GENERATE_CODE) ifeq ($(HAS_ALT_SRC), false) -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) else -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) +$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) $(GENERATE_CODE) endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/bsd/adlc_updater --- a/hotspot/make/bsd/adlc_updater Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/bsd/adlc_updater Wed Jul 05 21:35:27 2017 +0200 @@ -9,12 +9,15 @@ # fix_lines() { # repair bare #line directives in $1 to refer to $2 - awk < $1 > $1+ ' + # and add an override of __FILE__ with just the basename on the + # first line of the file. + awk < $1 > $1+ -v F2=$2 ' + BEGIN { print "#line 1 \"" F2 "\""; } /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} {print} - ' F2=$2 + ' mv $1+ $1 } -fix_lines $2/$1 $3/$1 +fix_lines $2/$1 $1 [ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \ ( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/bsd/makefiles/trace.make --- a/hotspot/make/bsd/makefiles/trace.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/bsd/makefiles/trace.make Wed Jul 05 21:35:27 2017 +0200 @@ -32,9 +32,12 @@ # ######################################################################### -HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ - echo "true"; else echo "false";\ - fi) +HAS_ALT_SRC := false +ifndef OPENJDK + ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), ) + HAS_ALT_SRC := true + endif +endif TOPDIR = $(shell echo `pwd`) GENERATED = $(TOPDIR)/../generated @@ -50,24 +53,30 @@ TraceGeneratedNames = \ traceEventClasses.hpp \ - traceEventIds.hpp \ - traceTypes.hpp + traceEventIds.hpp \ + traceTypes.hpp ifeq ($(HAS_ALT_SRC), true) -TraceGeneratedNames += \ - traceRequestables.hpp \ - traceEventControl.hpp + TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp endif - TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen -XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ - $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +TraceXml = $(TraceSrcDir)/trace.xml ifeq ($(HAS_ALT_SRC), true) - XML_DEPS += $(TraceAltSrcDir)/traceevents.xml + TraceXml = $(TraceAltSrcDir)/trace.xml +endif + +XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \ + $(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \ + $(TraceAltSrcDir)/traceeventtypes.xml endif .PHONY: all clean cleanall @@ -80,32 +89,31 @@ $(QUIETLY) echo $(LOG_INFO) Generating $@; \ $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@ -$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) +$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) $(GENERATE_CODE) ifeq ($(HAS_ALT_SRC), false) -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) else -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) +$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) $(GENERATE_CODE) endif # ######################################################################### - clean cleanall: rm $(TraceGeneratedFiles) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/linux/adlc_updater --- a/hotspot/make/linux/adlc_updater Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/linux/adlc_updater Wed Jul 05 21:35:27 2017 +0200 @@ -9,12 +9,15 @@ # fix_lines() { # repair bare #line directives in $1 to refer to $2 - awk < $1 > $1+ ' + # and add an override of __FILE__ with just the basename on the + # first line of the file. + awk < $1 > $1+ -v F2=$2 ' + BEGIN { print "#line 1 \"" F2 "\""; } /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} {print} - ' F2=$2 + ' mv $1+ $1 } -fix_lines $2/$1 $3/$1 +fix_lines $2/$1 $1 [ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \ ( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/linux/makefiles/trace.make --- a/hotspot/make/linux/makefiles/trace.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/linux/makefiles/trace.make Wed Jul 05 21:35:27 2017 +0200 @@ -32,9 +32,12 @@ # ######################################################################### -HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ - echo "true"; else echo "false";\ - fi) +HAS_ALT_SRC := false +ifndef OPENJDK + ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), ) + HAS_ALT_SRC := true + endif +endif TOPDIR = $(shell echo `pwd`) GENERATED = $(TOPDIR)/../generated @@ -50,23 +53,30 @@ TraceGeneratedNames = \ traceEventClasses.hpp \ - traceEventIds.hpp \ - traceTypes.hpp + traceEventIds.hpp \ + traceTypes.hpp ifeq ($(HAS_ALT_SRC), true) -TraceGeneratedNames += \ - traceRequestables.hpp \ - traceEventControl.hpp + TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp endif TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen -XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ - $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +TraceXml = $(TraceSrcDir)/trace.xml ifeq ($(HAS_ALT_SRC), true) - XML_DEPS += $(TraceAltSrcDir)/traceevents.xml + TraceXml = $(TraceAltSrcDir)/trace.xml +endif + +XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \ + $(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \ + $(TraceAltSrcDir)/traceeventtypes.xml endif .PHONY: all clean cleanall @@ -79,26 +89,26 @@ $(QUIETLY) echo $(LOG_INFO) Generating $@; \ $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@ -$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) +$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) $(GENERATE_CODE) ifeq ($(HAS_ALT_SRC), false) -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) else -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) +$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) $(GENERATE_CODE) endif @@ -107,5 +117,3 @@ clean cleanall: rm $(TraceGeneratedFiles) - - diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/share/makefiles/mapfile-vers --- a/hotspot/make/share/makefiles/mapfile-vers Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/share/makefiles/mapfile-vers Wed Jul 05 21:35:27 2017 +0200 @@ -109,8 +109,7 @@ JVM_GetPrimitiveArrayElement; JVM_GetProtectionDomain; JVM_GetStackAccessControlContext; - JVM_GetStackTraceDepth; - JVM_GetStackTraceElement; + JVM_GetStackTraceElements; JVM_GetSystemPackage; JVM_GetSystemPackages; JVM_GetTemporaryDirectory; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/solaris/adlc_updater --- a/hotspot/make/solaris/adlc_updater Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/solaris/adlc_updater Wed Jul 05 21:35:27 2017 +0200 @@ -9,12 +9,15 @@ # fix_lines() { # repair bare #line directives in $1 to refer to $2 - awk < $1 > $1+ ' + # and add an override of __FILE__ with just the basename on the + # first line of the file. + nawk < $1 > $1+ -v F2=$2 ' + BEGIN { print "#line 1 \"" F2 "\""; } /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next} {print} - ' F2=$2 + ' mv $1+ $1 } -fix_lines $2/$1 $3/$1 +fix_lines $2/$1 $1 [ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \ ( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 ) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/solaris/makefiles/trace.make --- a/hotspot/make/solaris/makefiles/trace.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/solaris/makefiles/trace.make Wed Jul 05 21:35:27 2017 +0200 @@ -32,9 +32,12 @@ # ######################################################################### -HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \ - echo "true"; else echo "false";\ - fi) +HAS_ALT_SRC := false +ifndef OPENJDK + ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), ) + HAS_ALT_SRC := true + endif +endif TOPDIR = $(shell echo `pwd`) GENERATED = $(TOPDIR)/../generated @@ -50,23 +53,30 @@ TraceGeneratedNames = \ traceEventClasses.hpp \ - traceEventIds.hpp \ - traceTypes.hpp + traceEventIds.hpp \ + traceTypes.hpp ifeq ($(HAS_ALT_SRC), true) -TraceGeneratedNames += \ - traceRequestables.hpp \ - traceEventControl.hpp + TraceGeneratedNames += \ + traceRequestables.hpp \ + traceEventControl.hpp endif TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%) XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen -XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ - $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +TraceXml = $(TraceSrcDir)/trace.xml ifeq ($(HAS_ALT_SRC), true) - XML_DEPS += $(TraceAltSrcDir)/traceevents.xml + TraceXml = $(TraceAltSrcDir)/trace.xml +endif + +XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \ + $(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml +ifeq ($(HAS_ALT_SRC), true) + XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \ + $(TraceAltSrcDir)/traceeventtypes.xml endif .PHONY: all clean cleanall @@ -79,26 +89,26 @@ $(QUIETLY) echo $(LOG_INFO) Generating $@; \ $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@ -$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) +$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) $(GENERATE_CODE) ifeq ($(HAS_ALT_SRC), false) -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) else -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) +$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) $(GENERATE_CODE) -$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) $(GENERATE_CODE) endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/windows/build.make --- a/hotspot/make/windows/build.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/windows/build.make Wed Jul 05 21:35:27 2017 +0200 @@ -114,11 +114,15 @@ # Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro # or make/hotspot_distro. !ifndef HOTSPOT_VM_DISTRO +!ifndef OPENJDK !if exists($(WorkSpace)\src\closed) !include $(WorkSpace)\make\hotspot_distro !else !include $(WorkSpace)\make\openjdk_distro !endif +!else +!include $(WorkSpace)\make\openjdk_distro +!endif !endif HS_FILEDESC=$(HOTSPOT_VM_DISTRO) $(ARCH_TEXT) $(VARIANT_TEXT) VM diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/windows/create_obj_files.sh --- a/hotspot/make/windows/create_obj_files.sh Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/windows/create_obj_files.sh Wed Jul 05 21:35:27 2017 +0200 @@ -55,7 +55,11 @@ ALTSRC_REL=src/closed # Change this to pick up alt sources from somewhere else COMMONSRC=${WorkSpace}/${COMMONSRC_REL} -ALTSRC=${WorkSpace}/${ALTSRC_REL} +if [ "x$OPENJDK" != "xtrue" ]; then + ALTSRC=${WorkSpace}/${ALTSRC_REL} +else + ALTSRC=PATH_THAT_DOES_NOT_EXIST +fi BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \); fi`" BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \)`" @@ -158,6 +162,6 @@ fi Obj_Files="${Obj_Files}$o " done -Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | sort` +Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | LC_ALL=C sort` echo Obj_Files=${Obj_Files} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/windows/makefiles/defs.make --- a/hotspot/make/windows/makefiles/defs.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/windows/makefiles/defs.make Wed Jul 05 21:35:27 2017 +0200 @@ -276,3 +276,7 @@ MAKE_ARGS += MT="$(subst /,\\,$(MT))" endif endif + +ifdef OPENJDK + MAKE_ARGS += OPENJDK="$(OPENJDK)" +endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/windows/makefiles/trace.make --- a/hotspot/make/windows/makefiles/trace.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/windows/makefiles/trace.make Wed Jul 05 21:35:27 2017 +0200 @@ -32,15 +32,21 @@ # ######################################################################### -TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace -TraceSrcDir = $(WorkSpace)/src/share/vm/trace +TraceAltSrcDir = $(WorkSpace)\src\closed\share\vm\trace +TraceSrcDir = $(WorkSpace)\src\share\vm\trace + +!ifndef OPENJDK +!if EXISTS($(TraceAltSrcDir)) +HAS_ALT_SRC = true +!endif +!endif TraceGeneratedNames = \ traceEventClasses.hpp \ traceEventIds.hpp \ traceTypes.hpp -!if EXISTS($(TraceAltSrcDir)) +!ifdef HAS_ALT_SRC TraceGeneratedNames = $(TraceGeneratedNames) \ traceRequestables.hpp \ traceEventControl.hpp @@ -51,22 +57,30 @@ #Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)" TraceGeneratedFiles = \ $(TraceOutDir)/traceEventClasses.hpp \ - $(TraceOutDir)/traceEventIds.hpp \ - $(TraceOutDir)/traceTypes.hpp + $(TraceOutDir)/traceEventIds.hpp \ + $(TraceOutDir)/traceTypes.hpp -!if EXISTS($(TraceAltSrcDir)) +!ifdef HAS_ALT_SRC TraceGeneratedFiles = $(TraceGeneratedFiles) \ - $(TraceOutDir)/traceRequestables.hpp \ + $(TraceOutDir)/traceRequestables.hpp \ $(TraceOutDir)/traceEventControl.hpp !endif XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen -XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \ - $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod +TraceXml = $(TraceSrcDir)/trace.xml + +!ifdef HAS_ALT_SRC +TraceXml = $(TraceAltSrcDir)/trace.xml +!endif -!if EXISTS($(TraceAltSrcDir)) -XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml +XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \ + $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \ + $(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml + +!ifdef HAS_ALT_SRC +XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceeventscustom.xml \ + $(TraceAltSrcDir)/traceeventtypes.xml !endif .PHONY: all clean cleanall @@ -76,33 +90,33 @@ default:: @if not exist $(TraceOutDir) mkdir $(TraceOutDir) -$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS) @echo Generating $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp -$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS) @echo Generating $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp -!if !EXISTS($(TraceAltSrcDir)) +!ifndef HAS_ALT_SRC -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) @echo Generating OpenJDK $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp !else -$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventClasses.hpp: $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) @echo Generating AltSrc $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp -$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) +$(TraceOutDir)/traceRequestables.hpp: $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) @echo Generating AltSrc $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp -$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) +$(TraceOutDir)/traceEventControl.hpp: $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) @echo Generating AltSrc $@ - @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp + $(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp !endif @@ -110,5 +124,3 @@ cleanall : rm $(TraceGeneratedFiles) - - diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/make/windows/makefiles/vm.make --- a/hotspot/make/windows/makefiles/vm.make Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/make/windows/makefiles/vm.make Wed Jul 05 21:35:27 2017 +0200 @@ -118,6 +118,7 @@ CXX_INCLUDE_DIRS=/I "..\generated" +!ifndef OPENJDK !if exists($(ALTSRC)\share\vm) CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm" !endif @@ -133,6 +134,7 @@ !if exists($(ALTSRC)\cpu\$(Platform_arch)\vm) CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm" !endif +!endif # OPENJDK CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) \ /I "$(COMMONSRC)\share\vm" \ @@ -187,10 +189,12 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto +!ifndef OPENJDK !if exists($(ALTSRC)\share\vm\jfr) VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers !endif +!endif # OPENJDK VM_PATH={$(VM_PATH)} @@ -310,6 +314,7 @@ {$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +!ifndef OPENJDK {$(ALTSRC)\share\vm\c1}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< @@ -392,6 +397,13 @@ {$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +{$(ALTSRC)\share\vm\jfr}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +!endif + {..\generated\incls}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< @@ -404,12 +416,6 @@ {..\generated\tracefiles}.cpp.obj:: $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< -{$(ALTSRC)\share\vm\jfr}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - -{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj:: - $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< - default:: _build_pch_file.obj: diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/aarch64.ad --- a/hotspot/src/cpu/aarch64/vm/aarch64.ad Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad Wed Jul 05 21:35:27 2017 +0200 @@ -14242,6 +14242,48 @@ ins_pipe(pipe_cmp_branch); %} +instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{ + match(If cmp (CmpU op1 op2)); + predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne + || n->in(1)->as_Bool()->_test._test == BoolTest::eq + || n->in(1)->as_Bool()->_test._test == BoolTest::gt + || n->in(1)->as_Bool()->_test._test == BoolTest::le); + effect(USE labl); + + ins_cost(BRANCH_COST); + format %{ "cbw$cmp $op1, $labl" %} + ins_encode %{ + Label* L = $labl$$label; + Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; + if (cond == Assembler::EQ || cond == Assembler::LS) + __ cbzw($op1$$Register, *L); + else + __ cbnzw($op1$$Register, *L); + %} + ins_pipe(pipe_cmp_branch); +%} + +instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{ + match(If cmp (CmpU op1 op2)); + predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne + || n->in(1)->as_Bool()->_test._test == BoolTest::eq + || n->in(1)->as_Bool()->_test._test == BoolTest::gt + || n->in(1)->as_Bool()->_test._test == BoolTest::le); + effect(USE labl); + + ins_cost(BRANCH_COST); + format %{ "cb$cmp $op1, $labl" %} + ins_encode %{ + Label* L = $labl$$label; + Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; + if (cond == Assembler::EQ || cond == Assembler::LS) + __ cbz($op1$$Register, *L); + else + __ cbnz($op1$$Register, *L); + %} + ins_pipe(pipe_cmp_branch); +%} + // Test bit and Branch // Patterns for short (< 32KiB) variants diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1221,6 +1221,38 @@ INSN(caspal, true, true) #undef INSN + // 8.1 Atomic operations + void lse_atomic(Register Rs, Register Rt, Register Rn, + enum operand_size sz, int op1, int op2, bool a, bool r) { + starti; + f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21); + rf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), rf(Rn, 5), zrf(Rt, 0); + } + +#define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \ + void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ + lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \ + } \ + void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \ + lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \ + } \ + void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \ + lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \ + } \ + void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\ + lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \ + } + INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000); + INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001); + INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010); + INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011); + INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100); + INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101); + INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110); + INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111); + INSN(swp, swpa, swpl, swpal, 1, 0b000); +#undef INSN + // Load register (literal) #define INSN(NAME, opc, V) \ void NAME(Register Rt, address dest) { \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1556,54 +1556,14 @@ } void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) { - if (UseLSE) { - __ mov(rscratch1, cmpval); - __ casal(Assembler::word, rscratch1, newval, addr); - __ cmpw(rscratch1, cmpval); - __ cset(rscratch1, Assembler::NE); - } else { - Label retry_load, nope; - // flush and load exclusive from the memory location - // and fail if it is not what we expect - __ prfm(Address(addr), PSTL1STRM); - __ bind(retry_load); - __ ldaxrw(rscratch1, addr); - __ cmpw(rscratch1, cmpval); - __ cset(rscratch1, Assembler::NE); - __ br(Assembler::NE, nope); - // if we store+flush with no intervening write rscratch1 wil be zero - __ stlxrw(rscratch1, newval, addr); - // retry so we only ever return after a load fails to compare - // ensures we don't return a stale value after a failed write. - __ cbnzw(rscratch1, retry_load); - __ bind(nope); - } + __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1); + __ cset(rscratch1, Assembler::NE); __ membar(__ AnyAny); } void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { - if (UseLSE) { - __ mov(rscratch1, cmpval); - __ casal(Assembler::xword, rscratch1, newval, addr); - __ cmp(rscratch1, cmpval); - __ cset(rscratch1, Assembler::NE); - } else { - Label retry_load, nope; - // flush and load exclusive from the memory location - // and fail if it is not what we expect - __ prfm(Address(addr), PSTL1STRM); - __ bind(retry_load); - __ ldaxr(rscratch1, addr); - __ cmp(rscratch1, cmpval); - __ cset(rscratch1, Assembler::NE); - __ br(Assembler::NE, nope); - // if we store+flush with no intervening write rscratch1 wil be zero - __ stlxr(rscratch1, newval, addr); - // retry so we only ever return after a load fails to compare - // ensures we don't return a stale value after a failed write. - __ cbnz(rscratch1, retry_load); - __ bind(nope); - } + __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1); + __ cset(rscratch1, Assembler::NE); __ membar(__ AnyAny); } @@ -3121,38 +3081,32 @@ BasicType type = src->type(); bool is_oop = type == T_OBJECT || type == T_ARRAY; - void (MacroAssembler::* lda)(Register Rd, Register Ra); - void (MacroAssembler::* add)(Register Rd, Register Rn, RegisterOrConstant increment); - void (MacroAssembler::* stl)(Register Rs, Register Rt, Register Rn); + void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr); + void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr); switch(type) { case T_INT: - lda = &MacroAssembler::ldaxrw; - add = &MacroAssembler::addw; - stl = &MacroAssembler::stlxrw; + xchg = &MacroAssembler::atomic_xchgalw; + add = &MacroAssembler::atomic_addalw; break; case T_LONG: - lda = &MacroAssembler::ldaxr; - add = &MacroAssembler::add; - stl = &MacroAssembler::stlxr; + xchg = &MacroAssembler::atomic_xchgal; + add = &MacroAssembler::atomic_addal; break; case T_OBJECT: case T_ARRAY: if (UseCompressedOops) { - lda = &MacroAssembler::ldaxrw; - add = &MacroAssembler::addw; - stl = &MacroAssembler::stlxrw; + xchg = &MacroAssembler::atomic_xchgalw; + add = &MacroAssembler::atomic_addalw; } else { - lda = &MacroAssembler::ldaxr; - add = &MacroAssembler::add; - stl = &MacroAssembler::stlxr; + xchg = &MacroAssembler::atomic_xchgal; + add = &MacroAssembler::atomic_addal; } break; default: ShouldNotReachHere(); - lda = &MacroAssembler::ldaxr; - add = &MacroAssembler::add; - stl = &MacroAssembler::stlxr; // unreachable + xchg = &MacroAssembler::atomic_xchgal; + add = &MacroAssembler::atomic_addal; // unreachable } switch (code) { @@ -3170,14 +3124,8 @@ assert_different_registers(inc.as_register(), dst, addr.base(), tmp, rscratch1, rscratch2); } - Label again; __ lea(tmp, addr); - __ prfm(Address(tmp), PSTL1STRM); - __ bind(again); - (_masm->*lda)(dst, tmp); - (_masm->*add)(rscratch1, dst, inc); - (_masm->*stl)(rscratch2, rscratch1, tmp); - __ cbnzw(rscratch2, again); + (_masm->*add)(dst, inc, tmp); break; } case lir_xchg: @@ -3186,17 +3134,12 @@ Register obj = as_reg(data); Register dst = as_reg(dest); if (is_oop && UseCompressedOops) { - __ encode_heap_oop(rscratch1, obj); - obj = rscratch1; + __ encode_heap_oop(rscratch2, obj); + obj = rscratch2; } - assert_different_registers(obj, addr.base(), tmp, rscratch2, dst); - Label again; + assert_different_registers(obj, addr.base(), tmp, rscratch1, dst); __ lea(tmp, addr); - __ prfm(Address(tmp), PSTL1STRM); - __ bind(again); - (_masm->*lda)(dst, tmp); - (_masm->*stl)(rscratch2, obj, tmp); - __ cbnzw(rscratch2, again); + (_masm->*xchg)(dst, obj, tmp); if (is_oop && UseCompressedOops) { __ decode_heap_oop(dst); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -55,6 +55,7 @@ define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); +define_pd_global(intx, PostLoopMultiversioning, false); // InitialCodeCacheSize derived from specjbb2000 run. define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize define_pd_global(intx, CodeCacheExpansionSize, 64*K); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1637,6 +1637,11 @@ } void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) { + if (UseLSE) { + mov(tmp, 1); + ldadd(Assembler::word, tmp, zr, counter_addr); + return; + } Label retry_load; prfm(Address(counter_addr), PSTL1STRM); bind(retry_load); @@ -2172,8 +2177,18 @@ return a != b.as_register() && a != c && b.as_register() != c; } -#define ATOMIC_OP(LDXR, OP, IOP, STXR) \ -void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ +#define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ +void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ + if (UseLSE) { \ + prev = prev->is_valid() ? prev : zr; \ + if (incr.is_register()) { \ + AOP(sz, incr.as_register(), prev, addr); \ + } else { \ + mov(rscratch2, incr.as_constant()); \ + AOP(sz, rscratch2, prev, addr); \ + } \ + return; \ + } \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, incr, addr) ? prev : rscratch2; \ @@ -2190,13 +2205,20 @@ } \ } -ATOMIC_OP(ldxr, add, sub, stxr) -ATOMIC_OP(ldxrw, addw, subw, stxrw) +ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) +ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) +ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) +ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) #undef ATOMIC_OP -#define ATOMIC_XCHG(OP, LDXR, STXR) \ +#define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ + if (UseLSE) { \ + prev = prev->is_valid() ? prev : zr; \ + AOP(sz, newv, prev, addr); \ + return; \ + } \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, newv, addr) ? prev : rscratch2; \ @@ -2211,8 +2233,10 @@ mov(prev, result); \ } -ATOMIC_XCHG(xchg, ldxr, stxr) -ATOMIC_XCHG(xchgw, ldxrw, stxrw) +ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) +ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) +ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) +ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) #undef ATOMIC_XCHG diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -957,9 +957,13 @@ void atomic_add(Register prev, RegisterOrConstant incr, Register addr); void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); + void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); + void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); void atomic_xchg(Register prev, Register newv, Register addr); void atomic_xchgw(Register prev, Register newv, Register addr); + void atomic_xchgal(Register prev, Register newv, Register addr); + void atomic_xchgalw(Register prev, Register newv, Register addr); void orptr(Address adr, RegisterOrConstant src) { ldr(rscratch2, adr); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,6 +31,7 @@ #include "code/vtableStubs.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" +#include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1711,20 +1711,42 @@ // to a long, int, short, or byte copy loop. // address generate_unsafe_copy(const char *name, - address byte_copy_entry) { -#ifdef PRODUCT - return StubRoutines::_jbyte_arraycopy; -#else + address byte_copy_entry, + address short_copy_entry, + address int_copy_entry, + address long_copy_entry) { + Label L_long_aligned, L_int_aligned, L_short_aligned; + Register s = c_rarg0, d = c_rarg1, count = c_rarg2; + __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame + // bump this on entry, not on exit: - __ lea(rscratch2, ExternalAddress((address)&SharedRuntime::_unsafe_array_copy_ctr)); - __ incrementw(Address(rscratch2)); + inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); + + __ orr(rscratch1, s, d); + __ orr(rscratch1, rscratch1, count); + + __ andr(rscratch1, rscratch1, BytesPerLong-1); + __ cbz(rscratch1, L_long_aligned); + __ andr(rscratch1, rscratch1, BytesPerInt-1); + __ cbz(rscratch1, L_int_aligned); + __ tbz(rscratch1, 0, L_short_aligned); __ b(RuntimeAddress(byte_copy_entry)); + + __ BIND(L_short_aligned); + __ lsr(count, count, LogBytesPerShort); // size => short_count + __ b(RuntimeAddress(short_copy_entry)); + __ BIND(L_int_aligned); + __ lsr(count, count, LogBytesPerInt); // size => int_count + __ b(RuntimeAddress(int_copy_entry)); + __ BIND(L_long_aligned); + __ lsr(count, count, LogBytesPerLong); // size => long_count + __ b(RuntimeAddress(long_copy_entry)); + return start; -#endif } // @@ -2090,7 +2112,10 @@ /*dest_uninitialized*/true); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - entry_jbyte_arraycopy); + entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_jlong_arraycopy); StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", entry_jbyte_arraycopy, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "interpreter/templateInterpreterGenerator.hpp" #include "interpreter/templateTable.hpp" #include "interpreter/bytecodeTracer.hpp" +#include "memory/resourceArea.hpp" #include "oops/arrayOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" @@ -1967,7 +1968,7 @@ __ push(RegSet::range(r0, r15), sp); __ mov(c_rarg2, r0); // Pass itos __ call_VM(noreg, - CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), + CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); __ pop(RegSet::range(r0, r15), sp); __ pop(state); @@ -1982,14 +1983,8 @@ __ push(rscratch1); __ push(rscratch2); __ push(rscratch3); - Label L; - __ mov(rscratch2, (address) &BytecodeCounter::_counter_value); - __ prfm(Address(rscratch2), PSTL1STRM); - __ bind(L); - __ ldxr(rscratch1, rscratch2); - __ add(rscratch1, rscratch1, 1); - __ stxr(rscratch3, rscratch1, rscratch2); - __ cbnzw(rscratch3, L); + __ mov(rscratch3, (address) &BytecodeCounter::_counter_value); + __ atomic_add(noreg, 1, rscratch3); __ pop(rscratch3); __ pop(rscratch2); __ pop(rscratch1); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -73,6 +73,7 @@ CPU_SHA1 = (1<<5), CPU_SHA2 = (1<<6), CPU_CRC32 = (1<<7), + CPU_LSE = (1<<8), CPU_A53MAC = (1 << 30), CPU_DMB_ATOMICS = (1 << 31), }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/assembler_ppc.hpp --- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -624,6 +624,7 @@ VNOR_OPCODE = (4u << OPCODE_SHIFT | 1284u ), VOR_OPCODE = (4u << OPCODE_SHIFT | 1156u ), VXOR_OPCODE = (4u << OPCODE_SHIFT | 1220u ), + VRLD_OPCODE = (4u << OPCODE_SHIFT | 196u ), VRLB_OPCODE = (4u << OPCODE_SHIFT | 4u ), VRLW_OPCODE = (4u << OPCODE_SHIFT | 132u ), VRLH_OPCODE = (4u << OPCODE_SHIFT | 68u ), @@ -2047,6 +2048,7 @@ inline void vnor( VectorRegister d, VectorRegister a, VectorRegister b); inline void vor( VectorRegister d, VectorRegister a, VectorRegister b); inline void vxor( VectorRegister d, VectorRegister a, VectorRegister b); + inline void vrld( VectorRegister d, VectorRegister a, VectorRegister b); inline void vrlb( VectorRegister d, VectorRegister a, VectorRegister b); inline void vrlw( VectorRegister d, VectorRegister a, VectorRegister b); inline void vrlh( VectorRegister d, VectorRegister a, VectorRegister b); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp --- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -839,6 +839,7 @@ inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); } inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); } inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); } +inline void Assembler::vrld( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLD_OPCODE | vrt(d) | vra(a) | vrb(b)); } inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); } inline void Assembler::vrlw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE | vrt(d) | vra(a) | vrb(b)); } inline void Assembler::vrlh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE | vrt(d) | vra(a) | vrb(b)); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp --- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -55,6 +55,7 @@ define_pd_global(bool, ResizeTLAB, true); define_pd_global(intx, LoopUnrollLimit, 60); define_pd_global(intx, LoopPercentProfileLimit, 10); +define_pd_global(intx, PostLoopMultiversioning, false); // Peephole and CISC spilling both break the graph, and so make the // scheduler sick. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,6 +28,7 @@ #include "classfile/javaClasses.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #define __ _masm-> diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/runtime_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/runtime_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/runtime_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,6 +31,7 @@ #include "code/vmreg.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" +#include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" #include "opto/runtime.hpp" #include "runtime/interfaceSupport.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -31,6 +31,7 @@ #include "frame_ppc.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" +#include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -2417,6 +2417,433 @@ return start; } + // Arguments for generated stub (little endian only): + // R3_ARG1 - source byte array address + // R4_ARG2 - destination byte array address + // R5_ARG3 - round key array + address generate_aescrypt_encryptBlock() { + assert(UseAES, "need AES instructions and misaligned SSE support"); + StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + + address start = __ function_entry(); + + Label L_doLast; + + Register from = R3_ARG1; // source array address + Register to = R4_ARG2; // destination array address + Register key = R5_ARG3; // round key array + + Register keylen = R8; + Register temp = R9; + Register keypos = R10; + Register hex = R11; + Register fifteen = R12; + + VectorRegister vRet = VR0; + + VectorRegister vKey1 = VR1; + VectorRegister vKey2 = VR2; + VectorRegister vKey3 = VR3; + VectorRegister vKey4 = VR4; + + VectorRegister fromPerm = VR5; + VectorRegister keyPerm = VR6; + VectorRegister toPerm = VR7; + VectorRegister fSplt = VR8; + + VectorRegister vTmp1 = VR9; + VectorRegister vTmp2 = VR10; + VectorRegister vTmp3 = VR11; + VectorRegister vTmp4 = VR12; + + VectorRegister vLow = VR13; + VectorRegister vHigh = VR14; + + __ li (hex, 16); + __ li (fifteen, 15); + __ vspltisb (fSplt, 0x0f); + + // load unaligned from[0-15] to vsRet + __ lvx (vRet, from); + __ lvx (vTmp1, fifteen, from); + __ lvsl (fromPerm, from); + __ vxor (fromPerm, fromPerm, fSplt); + __ vperm (vRet, vRet, vTmp1, fromPerm); + + // load keylen (44 or 52 or 60) + __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); + + // to load keys + __ lvsr (keyPerm, key); + __ vxor (vTmp2, vTmp2, vTmp2); + __ vspltisb (vTmp2, -16); + __ vrld (keyPerm, keyPerm, vTmp2); + __ vrld (keyPerm, keyPerm, vTmp2); + __ vsldoi (keyPerm, keyPerm, keyPerm, -8); + + // load the 1st round key to vKey1 + __ li (keypos, 0); + __ lvx (vKey1, keypos, key); + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey1, vTmp1, vKey1, keyPerm); + + // 1st round + __ vxor (vRet, vRet, vKey1); + + // load the 2nd round key to vKey1 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 3rd round key to vKey2 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + // load the 4th round key to vKey3 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey3, vTmp2, vTmp1, keyPerm); + + // load the 5th round key to vKey4 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey4, vTmp1, vTmp2, keyPerm); + + // 2nd - 5th rounds + __ vcipher (vRet, vRet, vKey1); + __ vcipher (vRet, vRet, vKey2); + __ vcipher (vRet, vRet, vKey3); + __ vcipher (vRet, vRet, vKey4); + + // load the 6th round key to vKey1 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 7th round key to vKey2 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + // load the 8th round key to vKey3 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey3, vTmp2, vTmp1, keyPerm); + + // load the 9th round key to vKey4 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey4, vTmp1, vTmp2, keyPerm); + + // 6th - 9th rounds + __ vcipher (vRet, vRet, vKey1); + __ vcipher (vRet, vRet, vKey2); + __ vcipher (vRet, vRet, vKey3); + __ vcipher (vRet, vRet, vKey4); + + // load the 10th round key to vKey1 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 11th round key to vKey2 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + // if all round keys are loaded, skip next 4 rounds + __ cmpwi (CCR0, keylen, 44); + __ beq (CCR0, L_doLast); + + // 10th - 11th rounds + __ vcipher (vRet, vRet, vKey1); + __ vcipher (vRet, vRet, vKey2); + + // load the 12th round key to vKey1 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 13th round key to vKey2 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + // if all round keys are loaded, skip next 2 rounds + __ cmpwi (CCR0, keylen, 52); + __ beq (CCR0, L_doLast); + + // 12th - 13th rounds + __ vcipher (vRet, vRet, vKey1); + __ vcipher (vRet, vRet, vKey2); + + // load the 14th round key to vKey1 + __ addi (keypos, keypos, 16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 15th round key to vKey2 + __ addi (keypos, keypos, 16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + __ bind(L_doLast); + + // last two rounds + __ vcipher (vRet, vRet, vKey1); + __ vcipherlast (vRet, vRet, vKey2); + + __ neg (temp, to); + __ lvsr (toPerm, temp); + __ vspltisb (vTmp2, -1); + __ vxor (vTmp1, vTmp1, vTmp1); + __ vperm (vTmp2, vTmp2, vTmp1, toPerm); + __ vxor (toPerm, toPerm, fSplt); + __ lvx (vTmp1, to); + __ vperm (vRet, vRet, vRet, toPerm); + __ vsel (vTmp1, vTmp1, vRet, vTmp2); + __ lvx (vTmp4, fifteen, to); + __ stvx (vTmp1, to); + __ vsel (vRet, vRet, vTmp4, vTmp2); + __ stvx (vRet, fifteen, to); + + __ blr(); + return start; + } + + // Arguments for generated stub (little endian only): + // R3_ARG1 - source byte array address + // R4_ARG2 - destination byte array address + // R5_ARG3 - K (key) in little endian int array + address generate_aescrypt_decryptBlock() { + assert(UseAES, "need AES instructions and misaligned SSE support"); + StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + + address start = __ function_entry(); + + Label L_doLast; + Label L_do44; + Label L_do52; + Label L_do60; + + Register from = R3_ARG1; // source array address + Register to = R4_ARG2; // destination array address + Register key = R5_ARG3; // round key array + + Register keylen = R8; + Register temp = R9; + Register keypos = R10; + Register hex = R11; + Register fifteen = R12; + + VectorRegister vRet = VR0; + + VectorRegister vKey1 = VR1; + VectorRegister vKey2 = VR2; + VectorRegister vKey3 = VR3; + VectorRegister vKey4 = VR4; + VectorRegister vKey5 = VR5; + + VectorRegister fromPerm = VR6; + VectorRegister keyPerm = VR7; + VectorRegister toPerm = VR8; + VectorRegister fSplt = VR9; + + VectorRegister vTmp1 = VR10; + VectorRegister vTmp2 = VR11; + VectorRegister vTmp3 = VR12; + VectorRegister vTmp4 = VR13; + + VectorRegister vLow = VR14; + VectorRegister vHigh = VR15; + + __ li (hex, 16); + __ li (fifteen, 15); + __ vspltisb (fSplt, 0x0f); + + // load unaligned from[0-15] to vsRet + __ lvx (vRet, from); + __ lvx (vTmp1, fifteen, from); + __ lvsl (fromPerm, from); + __ vxor (fromPerm, fromPerm, fSplt); + __ vperm (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE] + + // load keylen (44 or 52 or 60) + __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); + + // to load keys + __ lvsr (keyPerm, key); + __ vxor (vTmp2, vTmp2, vTmp2); + __ vspltisb (vTmp2, -16); + __ vrld (keyPerm, keyPerm, vTmp2); + __ vrld (keyPerm, keyPerm, vTmp2); + __ vsldoi (keyPerm, keyPerm, keyPerm, -8); + + __ cmpwi (CCR0, keylen, 44); + __ beq (CCR0, L_do44); + + __ cmpwi (CCR0, keylen, 52); + __ beq (CCR0, L_do52); + + // load the 15th round key to vKey11 + __ li (keypos, 240); + __ lvx (vTmp1, keypos, key); + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp1, vTmp2, keyPerm); + + // load the 14th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp2, vTmp1, keyPerm); + + // load the 13th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey3, vTmp1, vTmp2, keyPerm); + + // load the 12th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey4, vTmp2, vTmp1, keyPerm); + + // load the 11th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey5, vTmp1, vTmp2, keyPerm); + + // 1st - 5th rounds + __ vxor (vRet, vRet, vKey1); + __ vncipher (vRet, vRet, vKey2); + __ vncipher (vRet, vRet, vKey3); + __ vncipher (vRet, vRet, vKey4); + __ vncipher (vRet, vRet, vKey5); + + __ b (L_doLast); + + __ bind (L_do52); + + // load the 13th round key to vKey11 + __ li (keypos, 208); + __ lvx (vTmp1, keypos, key); + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp1, vTmp2, keyPerm); + + // load the 12th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp2, vTmp1, keyPerm); + + // load the 11th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey3, vTmp1, vTmp2, keyPerm); + + // 1st - 3rd rounds + __ vxor (vRet, vRet, vKey1); + __ vncipher (vRet, vRet, vKey2); + __ vncipher (vRet, vRet, vKey3); + + __ b (L_doLast); + + __ bind (L_do44); + + // load the 11th round key to vKey11 + __ li (keypos, 176); + __ lvx (vTmp1, keypos, key); + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp1, vTmp2, keyPerm); + + // 1st round + __ vxor (vRet, vRet, vKey1); + + __ bind (L_doLast); + + // load the 10th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey1, vTmp2, vTmp1, keyPerm); + + // load the 9th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey2, vTmp1, vTmp2, keyPerm); + + // load the 8th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey3, vTmp2, vTmp1, keyPerm); + + // load the 7th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey4, vTmp1, vTmp2, keyPerm); + + // load the 6th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey5, vTmp2, vTmp1, keyPerm); + + // last 10th - 6th rounds + __ vncipher (vRet, vRet, vKey1); + __ vncipher (vRet, vRet, vKey2); + __ vncipher (vRet, vRet, vKey3); + __ vncipher (vRet, vRet, vKey4); + __ vncipher (vRet, vRet, vKey5); + + // load the 5th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey1, vTmp1, vTmp2, keyPerm); + + // load the 4th round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey2, vTmp2, vTmp1, keyPerm); + + // load the 3rd round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey3, vTmp1, vTmp2, keyPerm); + + // load the 2nd round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp1, keypos, key); + __ vperm (vKey4, vTmp2, vTmp1, keyPerm); + + // load the 1st round key to vKey10 + __ addi (keypos, keypos, -16); + __ lvx (vTmp2, keypos, key); + __ vperm (vKey5, vTmp1, vTmp2, keyPerm); + + // last 5th - 1th rounds + __ vncipher (vRet, vRet, vKey1); + __ vncipher (vRet, vRet, vKey2); + __ vncipher (vRet, vRet, vKey3); + __ vncipher (vRet, vRet, vKey4); + __ vncipherlast (vRet, vRet, vKey5); + + __ neg (temp, to); + __ lvsr (toPerm, temp); + __ vspltisb (vTmp2, -1); + __ vxor (vTmp1, vTmp1, vTmp1); + __ vperm (vTmp2, vTmp2, vTmp1, toPerm); + __ vxor (toPerm, toPerm, fSplt); + __ lvx (vTmp1, to); + __ vperm (vRet, vRet, vRet, toPerm); + __ vsel (vTmp1, vTmp1, vRet, vTmp2); + __ lvx (vTmp4, fifteen, to); + __ stvx (vTmp1, to); + __ vsel (vRet, vRet, vTmp4, vTmp2); + __ stvx (vRet, fifteen, to); + + __ blr(); + return start; + } void generate_arraycopy_stubs() { // Note: the disjoint stubs must be generated first, some of @@ -2693,10 +3120,6 @@ // arraycopy stubs used by compilers generate_arraycopy_stubs(); - if (UseAESIntrinsics) { - guarantee(!UseAESIntrinsics, "not yet implemented."); - } - // Safefetch stubs. generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, &StubRoutines::_safefetch32_fault_pc, @@ -2719,6 +3142,12 @@ StubRoutines::_montgomerySquare = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); } + + if (UseAESIntrinsics) { + StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); + StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); + } + } public: diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -2211,7 +2211,7 @@ __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); __ mflr(R31); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); __ mtlr(R31); __ pop(state); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp --- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -122,7 +122,7 @@ (has_fcfids() ? " fcfids" : ""), (has_vand() ? " vand" : ""), (has_lqarx() ? " lqarx" : ""), - (has_vcipher() ? " vcipher" : ""), + (has_vcipher() ? " aes" : ""), (has_vpmsumb() ? " vpmsumb" : ""), (has_tcheck() ? " tcheck" : ""), (has_mfdscr() ? " mfdscr" : "") @@ -186,6 +186,28 @@ } // The AES intrinsic stubs require AES instruction support. +#if defined(VM_LITTLE_ENDIAN) + if (has_vcipher()) { + if (FLAG_IS_DEFAULT(UseAES)) { + UseAES = true; + } + } else if (UseAES) { + if (!FLAG_IS_DEFAULT(UseAES)) + warning("AES instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseAES, false); + } + + if (UseAES && has_vcipher()) { + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { + UseAESIntrinsics = true; + } + } else if (UseAESIntrinsics) { + if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) + warning("AES intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseAESIntrinsics, false); + } + +#else if (UseAES) { warning("AES instructions are not available on this CPU"); FLAG_SET_DEFAULT(UseAES, false); @@ -195,6 +217,7 @@ warning("AES intrinsics are not available on this CPU"); FLAG_SET_DEFAULT(UseAESIntrinsics, false); } +#endif if (UseAESCTRIntrinsics) { warning("AES/CTR intrinsics are not available on this CPU"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp --- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -53,6 +53,7 @@ define_pd_global(bool, ResizeTLAB, true); define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1 define_pd_global(intx, LoopPercentProfileLimit, 10); +define_pd_global(intx, PostLoopMultiversioning, false); define_pd_global(intx, MinJumpTableSize, 5); // Peephole and CISC spilling both break the graph, and so makes the diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp --- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #define __ _masm-> diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/sparc/vm/runtime_sparc.cpp --- a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "classfile/systemDictionary.hpp" #include "code/vmreg.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "nativeInst_sparc.hpp" #include "opto/runtime.hpp" #include "runtime/interfaceSupport.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp --- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp --- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1966,7 +1966,7 @@ // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer __ mov( Otos_l2, G3_scratch ); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); __ mov(Lscratch, O7); // restore return address __ pop(state); __ retl(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/assembler_x86.cpp --- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -3147,8 +3147,7 @@ void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "some form of AVX must be enabled"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x67); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3156,7 +3155,7 @@ void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8(0x00); emit_int8(0xC0 | encode); emit_int8(imm8); @@ -3199,8 +3198,7 @@ void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x74); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3210,8 +3208,7 @@ assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x74); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3222,9 +3219,8 @@ InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; int dst_enc = kdst->encoding(); - vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x74); emit_operand(as_Register(dst_enc), src); } @@ -3242,8 +3238,7 @@ void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x75); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3253,8 +3248,7 @@ assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x75); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3265,9 +3259,8 @@ InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; int dst_enc = kdst->encoding(); - vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x75); emit_operand(as_Register(dst_enc), src); } @@ -3285,8 +3278,7 @@ void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x76); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3296,8 +3288,7 @@ assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x76); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3308,9 +3299,8 @@ InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; int dst_enc = kdst->encoding(); - vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x76); emit_operand(as_Register(dst_enc), src); } @@ -3328,8 +3318,7 @@ void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x29); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3339,8 +3328,7 @@ assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_is_evex_instruction(); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x29); emit_int8((unsigned char)(0xC0 | encode)); } @@ -3352,9 +3340,8 @@ InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_is_evex_instruction(); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; int dst_enc = kdst->encoding(); - vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x29); emit_operand(as_Register(dst_enc), src); } @@ -3988,7 +3975,7 @@ void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8((unsigned char)0x0E); emit_int8((unsigned char)(0xC0 | encode)); emit_int8(imm8); @@ -4395,8 +4382,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_operand(dst, src); } @@ -4404,8 +4390,7 @@ void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4415,8 +4400,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_operand(dst, src); } @@ -4424,8 +4408,7 @@ void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4435,8 +4418,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_operand(dst, src); } @@ -4444,8 +4426,7 @@ void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4455,8 +4436,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_operand(dst, src); } @@ -4464,8 +4444,7 @@ void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4475,8 +4454,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_operand(dst, src); } @@ -4484,8 +4462,7 @@ void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4495,8 +4472,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_operand(dst, src); } @@ -4504,8 +4480,7 @@ void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4515,8 +4490,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_operand(dst, src); } @@ -4524,8 +4498,7 @@ void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4535,8 +4508,7 @@ InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_operand(dst, src); } @@ -4544,8 +4516,7 @@ void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4584,8 +4555,7 @@ void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4593,8 +4563,7 @@ void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4604,8 +4573,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_operand(dst, src); } @@ -4615,8 +4583,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x58); emit_operand(dst, src); } @@ -4640,8 +4607,7 @@ void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4649,8 +4615,7 @@ void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4660,8 +4625,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_operand(dst, src); } @@ -4671,8 +4635,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x5C); emit_operand(dst, src); } @@ -4706,8 +4669,7 @@ void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4715,8 +4677,7 @@ void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4726,8 +4687,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_operand(dst, src); } @@ -4737,8 +4697,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x59); emit_operand(dst, src); } @@ -4762,8 +4721,7 @@ void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4771,8 +4729,7 @@ void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4782,8 +4739,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_operand(dst, src); } @@ -4793,8 +4749,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x5E); emit_operand(dst, src); } @@ -4802,8 +4757,7 @@ void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x51); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4857,8 +4811,7 @@ void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x54); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4866,8 +4819,7 @@ void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x54); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4877,8 +4829,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x54); emit_operand(dst, src); } @@ -4888,8 +4839,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x54); emit_operand(dst, src); } @@ -4949,8 +4899,7 @@ void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x57); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4958,8 +4907,7 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x57); emit_int8((unsigned char)(0xC0 | encode)); } @@ -4969,8 +4917,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8(0x57); emit_operand(dst, src); } @@ -4980,8 +4927,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); emit_int8(0x57); emit_operand(dst, src); } @@ -4991,8 +4937,7 @@ assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x01); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5001,8 +4946,7 @@ assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x02); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5035,7 +4979,7 @@ NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFE); emit_operand(dst, src); } @@ -5067,8 +5011,7 @@ void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFC); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5076,8 +5019,7 @@ void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFD); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5085,8 +5027,7 @@ void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFE); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5094,8 +5035,7 @@ void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xD4); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5105,8 +5045,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFC); emit_operand(dst, src); } @@ -5116,8 +5055,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFD); emit_operand(dst, src); } @@ -5127,8 +5065,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFE); emit_operand(dst, src); } @@ -5138,8 +5075,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xD4); emit_operand(dst, src); } @@ -5178,8 +5114,7 @@ void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xF8); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5187,8 +5122,7 @@ void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xF9); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5196,8 +5130,7 @@ void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFA); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5205,8 +5138,7 @@ void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFB); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5216,8 +5148,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xF8); emit_operand(dst, src); } @@ -5227,8 +5158,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xF9); emit_operand(dst, src); } @@ -5238,8 +5168,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFA); emit_operand(dst, src); } @@ -5249,8 +5178,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xFB); emit_operand(dst, src); } @@ -5274,8 +5202,7 @@ void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xD5); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5283,8 +5210,7 @@ void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x40); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5292,8 +5218,7 @@ void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 2, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x40); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5303,8 +5228,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xD5); emit_operand(dst, src); } @@ -5314,8 +5238,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x40); emit_operand(dst, src); } @@ -5325,8 +5248,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x40); emit_operand(dst, src); } @@ -5638,8 +5560,7 @@ void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xDB); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5649,8 +5570,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xDB); emit_operand(dst, src); } @@ -5674,8 +5594,7 @@ void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xEB); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5685,8 +5604,7 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xEB); emit_operand(dst, src); } @@ -5702,8 +5620,7 @@ void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xEF); emit_int8((unsigned char)(0xC0 | encode)); } @@ -5713,20 +5630,96 @@ InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); emit_int8((unsigned char)0xEF); emit_operand(dst, src); } +// vinserti forms + +void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_avx2(), ""); + assert(imm8 <= 0x01, "imm8: %u", imm8); + int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x38); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - insert into lower 128 bits + // 0x01 - insert into upper 128 bits + emit_int8(imm8 & 0x01); +} + +void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { + assert(VM_Version::supports_avx2(), ""); + assert(dst != xnoreg, "sanity"); + assert(imm8 <= 0x01, "imm8: %u", imm8); + int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; + InstructionMark im(this); + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x38); + emit_operand(dst, src); + // 0x00 - insert into lower 128 bits + // 0x01 - insert into upper 128 bits + emit_int8(imm8 & 0x01); +} + +void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); + assert(imm8 <= 0x03, "imm8: %u", imm8); + InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x38); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - insert into q0 128 bits (0..127) + // 0x01 - insert into q1 128 bits (128..255) + // 0x02 - insert into q2 128 bits (256..383) + // 0x03 - insert into q3 128 bits (384..511) + emit_int8(imm8 & 0x03); +} + +void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { + assert(VM_Version::supports_avx(), ""); + assert(dst != xnoreg, "sanity"); + assert(imm8 <= 0x03, "imm8: %u", imm8); + int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; + InstructionMark im(this); + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x18); + emit_operand(dst, src); + // 0x00 - insert into q0 128 bits (0..127) + // 0x01 - insert into q1 128 bits (128..255) + // 0x02 - insert into q2 128 bits (256..383) + // 0x03 - insert into q3 128 bits (384..511) + emit_int8(imm8 & 0x03); +} + +void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); + assert(imm8 <= 0x01, "imm8: %u", imm8); + InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x38); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - insert into lower 256 bits + // 0x01 - insert into upper 256 bits + emit_int8(imm8 & 0x01); +} + + +// vinsertf forms + void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8(0x18); emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 128 bits @@ -5734,33 +5727,19 @@ emit_int8(imm8 & 0x01); } -void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_evex(), ""); - assert(imm8 <= 0x01, "imm8: %u", imm8); - InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x1A); - emit_int8((unsigned char)(0xC0 | encode)); - // 0x00 - insert into lower 256 bits - // 0x01 - insert into upper 256 bits - emit_int8(imm8 & 0x01); -} - -void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { - assert(VM_Version::supports_evex(), ""); +void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { + assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity"); assert(imm8 <= 0x01, "imm8: %u", imm8); - InstructionMark im(this); - InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); - // swap src<->dst for encoding - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x1A); - emit_operand(dst, src); - // 0x00 - insert into lower 256 bits - // 0x01 - insert into upper 256 bits + int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; + InstructionMark im(this); + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x18); + emit_operand(dst, src); + // 0x00 - insert into lower 128 bits + // 0x01 - insert into upper 128 bits emit_int8(imm8 & 0x01); } @@ -5768,8 +5747,7 @@ assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8(0x18); emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into q0 128 bits (0..127) @@ -5784,12 +5762,10 @@ assert(dst != xnoreg, "sanity"); assert(imm8 <= 0x03, "imm8: %u", imm8); int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; - int nds_enc = nds->is_valid() ? nds->encoding() : 0; InstructionMark im(this); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); - // swap src<->dst for encoding - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8(0x18); emit_operand(dst, src); // 0x00 - insert into q0 128 bits (0..127) @@ -5799,98 +5775,36 @@ emit_int8(imm8 & 0x03); } -void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { - assert(VM_Version::supports_avx(), ""); - assert(dst != xnoreg, "sanity"); - assert(imm8 <= 0x01, "imm8: %u", imm8); - int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - InstructionMark im(this); - InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); - // swap src<->dst for encoding - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x18); - emit_operand(dst, src); - // 0x00 - insert into lower 128 bits - // 0x01 - insert into upper 128 bits - emit_int8(imm8 & 0x01); -} - -void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_avx(), ""); - assert(imm8 <= 0x01, "imm8: %u", imm8); - int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; - InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x19); - emit_int8((unsigned char)(0xC0 | encode)); - // 0x00 - extract from lower 128 bits - // 0x01 - extract from upper 128 bits - emit_int8(imm8 & 0x01); -} - -void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_avx(), ""); - assert(src != xnoreg, "sanity"); - assert(imm8 <= 0x01, "imm8: %u", imm8); - int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; - InstructionMark im(this); - InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); - vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x19); - emit_operand(src, dst); - // 0x00 - extract from lower 128 bits - // 0x01 - extract from upper 128 bits - emit_int8(imm8 & 0x01); -} - -void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_avx2(), ""); - assert(imm8 <= 0x01, "imm8: %u", imm8); - int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; - InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x38); - emit_int8((unsigned char)(0xC0 | encode)); - // 0x00 - insert into lower 128 bits - // 0x01 - insert into upper 128 bits - emit_int8(imm8 & 0x01); -} - -void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { +void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x38); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x1A); emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 256 bits // 0x01 - insert into upper 256 bits emit_int8(imm8 & 0x01); } -void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { - assert(VM_Version::supports_avx2(), ""); +void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); assert(dst != xnoreg, "sanity"); assert(imm8 <= 0x01, "imm8: %u", imm8); - int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - InstructionMark im(this); - InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); - // swap src<->dst for encoding - vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x38); - emit_operand(dst, src); - // 0x00 - insert into lower 128 bits - // 0x01 - insert into upper 128 bits + InstructionMark im(this); + InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); + vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x1A); + emit_operand(dst, src); + // 0x00 - insert into lower 256 bits + // 0x01 - insert into upper 256 bits emit_int8(imm8 & 0x01); } + +// vextracti forms + void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); @@ -5920,16 +5834,36 @@ emit_int8(imm8 & 0x01); } -void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_evex(), ""); - assert(imm8 <= 0x01, "imm8: %u", imm8); - InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); +void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_avx(), ""); + assert(imm8 <= 0x03, "imm8: %u", imm8); + int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit; + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x3B); - emit_int8((unsigned char)(0xC0 | encode)); - // 0x00 - extract from lower 256 bits - // 0x01 - extract from upper 256 bits - emit_int8(imm8 & 0x01); + emit_int8(0x39); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - extract from bits 127:0 + // 0x01 - extract from bits 255:128 + // 0x02 - extract from bits 383:256 + // 0x03 - extract from bits 511:384 + emit_int8(imm8 & 0x03); +} + +void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); + assert(src != xnoreg, "sanity"); + assert(imm8 <= 0x03, "imm8: %u", imm8); + InstructionMark im(this); + InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); + vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x39); + emit_operand(src, dst); + // 0x00 - extract from bits 127:0 + // 0x01 - extract from bits 255:128 + // 0x02 - extract from bits 383:256 + // 0x03 - extract from bits 511:384 + emit_int8(imm8 & 0x03); } void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { @@ -5946,30 +5880,47 @@ emit_int8(imm8 & 0x03); } -void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { +void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x1B); + emit_int8(0x3B); emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from lower 256 bits // 0x01 - extract from upper 256 bits emit_int8(imm8 & 0x01); } -void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { - assert(VM_Version::supports_evex(), ""); + +// vextractf forms + +void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_avx(), ""); + assert(imm8 <= 0x01, "imm8: %u", imm8); + int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x19); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - extract from lower 128 bits + // 0x01 - extract from upper 128 bits + emit_int8(imm8 & 0x01); +} + +void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_avx(), ""); assert(src != xnoreg, "sanity"); assert(imm8 <= 0x01, "imm8: %u", imm8); - InstructionMark im(this); - InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); - attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); + int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit; + InstructionMark im(this); + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8(0x1B); + emit_int8(0x19); emit_operand(src, dst); - // 0x00 - extract from lower 256 bits - // 0x01 - extract from upper 256 bits + // 0x00 - extract from lower 128 bits + // 0x01 - extract from upper 128 bits emit_int8(imm8 & 0x01); } @@ -6019,7 +5970,43 @@ emit_int8(imm8 & 0x03); } -// duplicate 4-bytes integer data from src into 8 locations in dest +void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); + assert(imm8 <= 0x01, "imm8: %u", imm8); + InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x1B); + emit_int8((unsigned char)(0xC0 | encode)); + // 0x00 - extract from lower 256 bits + // 0x01 - extract from upper 256 bits + emit_int8(imm8 & 0x01); +} + +void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { + assert(VM_Version::supports_evex(), ""); + assert(src != xnoreg, "sanity"); + assert(imm8 <= 0x01, "imm8: %u", imm8); + InstructionMark im(this); + InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); + vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + emit_int8(0x1B); + emit_operand(src, dst); + // 0x00 - extract from lower 256 bits + // 0x01 - extract from upper 256 bits + emit_int8(imm8 & 0x01); +} + + +// legacy word/dword replicate +void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { + assert(VM_Version::supports_avx2(), ""); + InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + emit_int8(0x79); + emit_int8((unsigned char)(0xC0 | encode)); +} + void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6028,16 +6015,10 @@ emit_int8((unsigned char)(0xC0 | encode)); } -// duplicate 2-bytes integer data from src into 16 locations in dest -void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { - assert(VM_Version::supports_avx2(), ""); - InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); - int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); - emit_int8(0x79); - emit_int8((unsigned char)(0xC0 | encode)); -} - -// duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL + +// xmm/mem sourced byte/word/dword/qword replicate + +// duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); @@ -6053,12 +6034,12 @@ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); // swap src<->dst for encoding - vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x78); emit_operand(dst, src); } -// duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL +// duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); @@ -6074,12 +6055,12 @@ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); // swap src<->dst for encoding - vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x79); emit_operand(dst, src); } -// duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL +// duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6095,12 +6076,12 @@ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); // swap src<->dst for encoding - vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x58); emit_operand(dst, src); } -// duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL +// duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6116,12 +6097,15 @@ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); // swap src<->dst for encoding - vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); emit_int8(0x59); emit_operand(dst, src); } -// duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL + +// scalar single/double precision replicate + +// duplicate single precision data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6142,7 +6126,7 @@ emit_operand(dst, src); } -// duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL +// duplicate double precision data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6163,7 +6147,10 @@ emit_operand(dst, src); } -// duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL + +// gpr source broadcast forms + +// duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); @@ -6176,7 +6163,7 @@ emit_int8((unsigned char)(0xC0 | encode)); } -// duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL +// duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); @@ -6189,7 +6176,7 @@ emit_int8((unsigned char)(0xC0 | encode)); } -// duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL +// duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6202,7 +6189,7 @@ emit_int8((unsigned char)(0xC0 | encode)); } -// duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL +// duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); @@ -6215,6 +6202,7 @@ emit_int8((unsigned char)(0xC0 | encode)); } + // Carry-Less Multiplication Quadword void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { assert(VM_Version::supports_clmul(), ""); @@ -6229,8 +6217,7 @@ void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8(0x44); emit_int8((unsigned char)(0xC0 | encode)); emit_int8((unsigned char)mask); @@ -6972,8 +6959,7 @@ assert(VM_Version::supports_avx(), ""); assert(!VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false); - int nds_enc = nds->is_valid() ? nds->encoding() : 0; - int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); + int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); emit_int8((unsigned char)0x4B); emit_int8((unsigned char)(0xC0 | encode)); int src2_enc = src2->encoding(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/assembler_x86.hpp --- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1977,39 +1977,43 @@ void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len); - // 128bit copy from/to 256bit (YMM) vector registers - void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + // vinserti forms void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); - void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); - void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8); - void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); - void vextractf128(Address dst, XMMRegister src, uint8_t imm8); + void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); + void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + + // vinsertf forms + void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); + void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); + void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); + + // vextracti forms + void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextracti128(Address dst, XMMRegister src, uint8_t imm8); - - // 256bit copy from/to 512bit (ZMM) vector registers - void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); - void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); + void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); + void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8); + void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); + + // vextractf forms + void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); + void vextractf128(Address dst, XMMRegister src, uint8_t imm8); + void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); + void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8); + void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8); - void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); - - // 128bit copy from/to 256bit (YMM) or 512bit (ZMM) vector registers - void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); - void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); - void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8); - void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8); - void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8); - void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8); - - // duplicate 4-bytes integer data from src into 8 locations in dest + + // legacy xmm sourced word/dword replicate + void vpbroadcastw(XMMRegister dst, XMMRegister src); void vpbroadcastd(XMMRegister dst, XMMRegister src); - // duplicate 2-bytes integer data from src into 16 locations in dest - void vpbroadcastw(XMMRegister dst, XMMRegister src); - - // duplicate n-bytes integer data from src into vector_len locations in dest + // xmm/mem sourced byte/word/dword/qword replicate void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); void evpbroadcastb(XMMRegister dst, Address src, int vector_len); void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); @@ -2019,11 +2023,13 @@ void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); void evpbroadcastq(XMMRegister dst, Address src, int vector_len); + // scalar single/double precision replicate void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); void evpbroadcastss(XMMRegister dst, Address src, int vector_len); void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); void evpbroadcastsd(XMMRegister dst, Address src, int vector_len); + // gpr sourced byte/word/dword/qword replicate void evpbroadcastb(XMMRegister dst, Register src, int vector_len); void evpbroadcastw(XMMRegister dst, Register src, int vector_len); void evpbroadcastd(XMMRegister dst, Register src, int vector_len); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -312,7 +312,7 @@ Register OSR_buf = osrBufferPointer()->as_pointer_register(); { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() + - (2 * BytesPerWord) * (number_of_locks - 1); + (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/c2_globals_x86.hpp --- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -47,6 +47,7 @@ define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, MinJumpTableSize, 10); define_pd_global(intx, LoopPercentProfileLimit, 30); +define_pd_global(intx, PostLoopMultiversioning, true); #ifdef AMD64 define_pd_global(intx, INTPRESSURE, 13); define_pd_global(intx, FLOATPRESSURE, 14); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/frame_x86.hpp --- a/hotspot/src/cpu/x86/vm/frame_x86.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,44 +54,6 @@ // <- sender sp // ------------------------------ Asm interpreter ---------------------------------------- -// ------------------------------ C++ interpreter ---------------------------------------- -// -// Layout of C++ interpreter frame: (While executing in BytecodeInterpreter::run) -// -// <- SP (current esp/rsp) -// [local variables ] BytecodeInterpreter::run local variables -// ... BytecodeInterpreter::run local variables -// [local variables ] BytecodeInterpreter::run local variables -// [old frame pointer ] fp [ BytecodeInterpreter::run's ebp/rbp ] -// [return pc ] (return to frame manager) -// [interpreter_state* ] (arg to BytecodeInterpreter::run) -------------- -// [expression stack ] <- last_Java_sp | -// [... ] * <- interpreter_state.stack | -// [expression stack ] * <- interpreter_state.stack_base | -// [monitors ] \ | -// ... | monitor block size | -// [monitors ] / <- interpreter_state.monitor_base | -// [struct interpretState ] <-----------------------------------------| -// [return pc ] (return to callee of frame manager [1] -// [locals and parameters ] -// <- sender sp - -// [1] When the C++ interpreter calls a new method it returns to the frame -// manager which allocates a new frame on the stack. In that case there -// is no real callee of this newly allocated frame. The frame manager is -// aware of the additional frame(s) and will pop them as nested calls -// complete. However, to make it look good in the debugger the frame -// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation -// with a fake interpreter_state* parameter to make it easy to debug -// nested calls. - -// Note that contrary to the layout for the assembly interpreter the -// expression stack allocated for the C++ interpreter is full sized. -// However this is not as bad as it seems as the interpreter frame_manager -// will truncate the unused space on successive method calls. -// -// ------------------------------ C++ interpreter ---------------------------------------- - public: enum { pc_return_offset = 0, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/interp_masm_x86.cpp --- a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -296,7 +296,7 @@ Label L; cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); jcc(Assembler::equal, L); - stop("InterpreterMacroAssembler::call_VM_leaf_base:" + stop("InterpreterMacroAssembler::call_VM_base:" " last_sp != NULL"); bind(L); } @@ -1099,7 +1099,7 @@ movptr(Address(lock_reg, mark_offset), swap_reg); assert(lock_offset == 0, - "displached header must be first word in BasicObjectLock"); + "displaced header must be first word in BasicObjectLock"); if (os::is_MP()) lock(); cmpxchgptr(lock_reg, Address(obj_reg, 0)); @@ -1154,7 +1154,7 @@ // Kills: // rax // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) -// rscratch1, rscratch2 (scratch regs) +// rscratch1 (scratch reg) // rax, rbx, rcx, rdx void InterpreterMacroAssembler::unlock_object(Register lock_reg) { assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), @@ -1201,7 +1201,7 @@ if (os::is_MP()) lock(); cmpxchgptr(header_reg, Address(obj_reg, 0)); - // zero for recursive case + // zero for simple unlock of a stack-lock case jcc(Assembler::zero, done); // Call the runtime routine for slow case. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp --- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1106,7 +1106,7 @@ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); - Address saved_mark_addr(lock_reg, 0); + NOT_LP64( Address saved_mark_addr(lock_reg, 0); ) if (PrintBiasedLockingStatistics && counters == NULL) { counters = BiasedLocking::counters(); @@ -1695,7 +1695,7 @@ RTMLockingCounters* stack_rtm_counters, Metadata* method_data, bool use_rtm, bool profile_rtm) { - // Ensure the register assignents are disjoint + // Ensure the register assignments are disjoint assert(tmpReg == rax, ""); if (use_rtm) { @@ -2194,8 +2194,8 @@ cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); jccb (Assembler::zero, LGoSlowPath); + xorptr(boxReg, boxReg); if ((EmitSync & 16) && os::is_MP()) { - orptr(boxReg, boxReg); xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); } else { movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); @@ -2227,7 +2227,6 @@ // box is really RAX -- the following CMPXCHG depends on that binding // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R) - movptr(boxReg, (int32_t)NULL_WORD); if (os::is_MP()) { lock(); } cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // There's no successor so we tried to regrab the lock. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp --- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1216,7 +1216,10 @@ void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { - if (UseAVX > 1) { // vinserti128 is available only in AVX2 + if (UseAVX > 2) { + Assembler::vinserti32x4(dst, dst, src, imm8); + } else if (UseAVX > 1) { + // vinserti128 is available only in AVX2 Assembler::vinserti128(dst, nds, src, imm8); } else { Assembler::vinsertf128(dst, nds, src, imm8); @@ -1224,7 +1227,10 @@ } void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { - if (UseAVX > 1) { // vinserti128 is available only in AVX2 + if (UseAVX > 2) { + Assembler::vinserti32x4(dst, dst, src, imm8); + } else if (UseAVX > 1) { + // vinserti128 is available only in AVX2 Assembler::vinserti128(dst, nds, src, imm8); } else { Assembler::vinsertf128(dst, nds, src, imm8); @@ -1232,7 +1238,10 @@ } void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { - if (UseAVX > 1) { // vextracti128 is available only in AVX2 + if (UseAVX > 2) { + Assembler::vextracti32x4(dst, src, imm8); + } else if (UseAVX > 1) { + // vextracti128 is available only in AVX2 Assembler::vextracti128(dst, src, imm8); } else { Assembler::vextractf128(dst, src, imm8); @@ -1240,7 +1249,10 @@ } void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { - if (UseAVX > 1) { // vextracti128 is available only in AVX2 + if (UseAVX > 2) { + Assembler::vextracti32x4(dst, src, imm8); + } else if (UseAVX > 1) { + // vextracti128 is available only in AVX2 Assembler::vextracti128(dst, src, imm8); } else { Assembler::vextractf128(dst, src, imm8); @@ -1260,37 +1272,57 @@ void vextracti128_high(Address dst, XMMRegister src) { vextracti128(dst, src, 1); } + void vinsertf128_high(XMMRegister dst, XMMRegister src) { - vinsertf128(dst, dst, src, 1); + if (UseAVX > 2) { + Assembler::vinsertf32x4(dst, dst, src, 1); + } else { + Assembler::vinsertf128(dst, dst, src, 1); + } } + void vinsertf128_high(XMMRegister dst, Address src) { - vinsertf128(dst, dst, src, 1); + if (UseAVX > 2) { + Assembler::vinsertf32x4(dst, dst, src, 1); + } else { + Assembler::vinsertf128(dst, dst, src, 1); + } } + void vextractf128_high(XMMRegister dst, XMMRegister src) { - vextractf128(dst, src, 1); + if (UseAVX > 2) { + Assembler::vextractf32x4(dst, src, 1); + } else { + Assembler::vextractf128(dst, src, 1); + } } + void vextractf128_high(Address dst, XMMRegister src) { - vextractf128(dst, src, 1); + if (UseAVX > 2) { + Assembler::vextractf32x4(dst, src, 1); + } else { + Assembler::vextractf128(dst, src, 1); + } } // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers void vinserti64x4_high(XMMRegister dst, XMMRegister src) { - vinserti64x4(dst, dst, src, 1); + Assembler::vinserti64x4(dst, dst, src, 1); } void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { - vinsertf64x4(dst, dst, src, 1); + Assembler::vinsertf64x4(dst, dst, src, 1); } void vextracti64x4_high(XMMRegister dst, XMMRegister src) { - vextracti64x4(dst, src, 1); + Assembler::vextracti64x4(dst, src, 1); } void vextractf64x4_high(XMMRegister dst, XMMRegister src) { - vextractf64x4(dst, src, 1); + Assembler::vextractf64x4(dst, src, 1); } void vextractf64x4_high(Address dst, XMMRegister src) { - vextractf64x4(dst, src, 1); + Assembler::vextractf64x4(dst, src, 1); } void vinsertf64x4_high(XMMRegister dst, Address src) { - vinsertf64x4(dst, dst, src, 1); + Assembler::vinsertf64x4(dst, dst, src, 1); } // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers @@ -1306,40 +1338,59 @@ void vextracti128_low(Address dst, XMMRegister src) { vextracti128(dst, src, 0); } + void vinsertf128_low(XMMRegister dst, XMMRegister src) { - vinsertf128(dst, dst, src, 0); + if (UseAVX > 2) { + Assembler::vinsertf32x4(dst, dst, src, 0); + } else { + Assembler::vinsertf128(dst, dst, src, 0); + } } + void vinsertf128_low(XMMRegister dst, Address src) { - vinsertf128(dst, dst, src, 0); + if (UseAVX > 2) { + Assembler::vinsertf32x4(dst, dst, src, 0); + } else { + Assembler::vinsertf128(dst, dst, src, 0); + } } + void vextractf128_low(XMMRegister dst, XMMRegister src) { - vextractf128(dst, src, 0); + if (UseAVX > 2) { + Assembler::vextractf32x4(dst, src, 0); + } else { + Assembler::vextractf128(dst, src, 0); + } } + void vextractf128_low(Address dst, XMMRegister src) { - vextractf128(dst, src, 0); + if (UseAVX > 2) { + Assembler::vextractf32x4(dst, src, 0); + } else { + Assembler::vextractf128(dst, src, 0); + } } // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers void vinserti64x4_low(XMMRegister dst, XMMRegister src) { - vinserti64x4(dst, dst, src, 0); + Assembler::vinserti64x4(dst, dst, src, 0); } void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { - vinsertf64x4(dst, dst, src, 0); + Assembler::vinsertf64x4(dst, dst, src, 0); } void vextracti64x4_low(XMMRegister dst, XMMRegister src) { - vextracti64x4(dst, src, 0); + Assembler::vextracti64x4(dst, src, 0); } void vextractf64x4_low(XMMRegister dst, XMMRegister src) { - vextractf64x4(dst, src, 0); + Assembler::vextractf64x4(dst, src, 0); } void vextractf64x4_low(Address dst, XMMRegister src) { - vextractf64x4(dst, src, 0); + Assembler::vextractf64x4(dst, src, 0); } void vinsertf64x4_low(XMMRegister dst, Address src) { - vinsertf64x4(dst, dst, src, 0); + Assembler::vinsertf64x4(dst, dst, src, 0); } - // Carry-Less Multiplication Quadword void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { // 0x00 - multiply lower 64 bits [0:63] diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/methodHandles_x86.cpp --- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #define __ _masm-> diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/runtime_x86_32.cpp --- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "classfile/systemDictionary.hpp" #include "code/vmreg.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "opto/runtime.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp --- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/sharedRuntime.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp --- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1830,7 +1830,7 @@ __ push(state); // save tosca // pass tosca registers as arguments & call tracer - __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) __ pop(state); // restore tosca @@ -1847,7 +1847,7 @@ __ movflt(xmm3, xmm0); // Pass ftos #endif __ call_VM(noreg, - CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), + CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), c_rarg1, c_rarg2, c_rarg3); __ pop(c_rarg3); __ pop(c_rarg2); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/cpu/zero/vm/methodHandles_zero.cpp --- a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #include "interpreter/cppInterpreterGenerator.hpp" #include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java Wed Jul 05 21:35:27 2017 +0200 @@ -549,11 +549,9 @@ }, new Command("buildreplayjars", "buildreplayjars [ all | app | boot ] | [ prefix ]", false) { // This is used to dump jar files of all the classes - // loaded in the core. Everything on the bootclasspath + // loaded in the core. Everything with null classloader // will go in boot.jar and everything else will go in - // app.jar. Then the classes can be loaded by the replay - // jvm using -Xbootclasspath/p:boot.jar -cp app.jar. boot.jar usually - // not needed, unless changed by jvmti. + // app.jar. boot.jar usually not needed, unless changed by jvmti. public void doit(Tokens t) { int tcount = t.countTokens(); if (tcount > 2) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SAGetopt.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SAGetopt.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SAGetopt.java Wed Jul 05 21:35:27 2017 +0200 @@ -84,7 +84,11 @@ } else { // Mixed style options --file name - extractOptarg(ca[0]); + try { + extractOptarg(ca[0]); + } catch (ArrayIndexOutOfBoundsException e) { + throw new RuntimeException("Argument is expected for '" + ca[0] + "'"); + } } return ca[0]; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ import sun.jvm.hotspot.tools.JStack; import sun.jvm.hotspot.tools.JMap; import sun.jvm.hotspot.tools.JInfo; +import sun.jvm.hotspot.tools.JSnap; public class SALauncher { @@ -39,6 +40,7 @@ System.out.println(" jstack --help\tto get more information"); System.out.println(" jmap --help\tto get more information"); System.out.println(" jinfo --help\tto get more information"); + System.out.println(" jsnap --help\tto get more information"); return false; } @@ -85,6 +87,11 @@ return commonHelp(); } + private static boolean jsnapHelp() { + System.out.println(" --all\tto print all performance counters"); + return commonHelp(); + } + private static boolean toolHelp(String toolName) { if (toolName.equals("jstack")) { return jstackHelp(); @@ -95,24 +102,62 @@ if (toolName.equals("jmap")) { return jmapHelp(); } + if (toolName.equals("jsnap")) { + return jsnapHelp(); + } if (toolName.equals("hsdb") || toolName.equals("clhsdb")) { return commonHelp(); } return launcherHelp(); } + private static void buildAttachArgs(ArrayList newArgs, + String pid, String exe, String core) { + if ((pid == null) && (exe == null)) { + throw new IllegalArgumentException( + "You have to set --pid or --exe."); + } + + if (pid != null) { // Attach to live process + if (exe != null) { + throw new IllegalArgumentException( + "Unnecessary argument: --exe"); + } else if (core != null) { + throw new IllegalArgumentException( + "Unnecessary argument: --core"); + } else if (!pid.matches("^\\d+$")) { + throw new IllegalArgumentException("Invalid pid: " + pid); + } + + newArgs.add(pid); + } else { + if (exe.length() == 0) { + throw new IllegalArgumentException("You have to set --exe."); + } + + newArgs.add(exe); + + if ((core == null) || (core.length() == 0)) { + throw new IllegalArgumentException("You have to set --core."); + } + + newArgs.add(core); + } + } + private static void runCLHSDB(String[] oldArgs) { SAGetopt sg = new SAGetopt(oldArgs); String[] longOpts = {"exe=", "core=", "pid="}; ArrayList newArgs = new ArrayList(); - String exeORpid = null; + String pid = null; + String exe = null; String core = null; String s = null; while((s = sg.next(null, longOpts)) != null) { if (s.equals("exe")) { - exeORpid = sg.getOptarg(); + exe = sg.getOptarg(); continue; } if (s.equals("core")) { @@ -120,17 +165,12 @@ continue; } if (s.equals("pid")) { - exeORpid = sg.getOptarg(); + pid = sg.getOptarg(); continue; } } - if (exeORpid != null) { - newArgs.add(exeORpid); - if (core != null) { - newArgs.add(core); - } - } + buildAttachArgs(newArgs, pid, exe, core); CLHSDB.main(newArgs.toArray(new String[newArgs.size()])); } @@ -139,13 +179,14 @@ String[] longOpts = {"exe=", "core=", "pid="}; ArrayList newArgs = new ArrayList(); - String exeORpid = null; + String pid = null; + String exe = null; String core = null; String s = null; while((s = sg.next(null, longOpts)) != null) { if (s.equals("exe")) { - exeORpid = sg.getOptarg(); + exe = sg.getOptarg(); continue; } if (s.equals("core")) { @@ -153,17 +194,12 @@ continue; } if (s.equals("pid")) { - exeORpid = sg.getOptarg(); + pid = sg.getOptarg(); continue; } } - if (exeORpid != null) { - newArgs.add(exeORpid); - if (core != null) { - newArgs.add(core); - } - } + buildAttachArgs(newArgs, pid, exe, core); HSDB.main(newArgs.toArray(new String[newArgs.size()])); } @@ -173,13 +209,14 @@ "mixed", "locks"}; ArrayList newArgs = new ArrayList(); - String exeORpid = null; + String pid = null; + String exe = null; String core = null; String s = null; while((s = sg.next(null, longOpts)) != null) { if (s.equals("exe")) { - exeORpid = sg.getOptarg(); + exe = sg.getOptarg(); continue; } if (s.equals("core")) { @@ -187,7 +224,7 @@ continue; } if (s.equals("pid")) { - exeORpid = sg.getOptarg(); + pid = sg.getOptarg(); continue; } if (s.equals("mixed")) { @@ -200,13 +237,7 @@ } } - if (exeORpid != null) { - newArgs.add(exeORpid); - if (core != null) { - newArgs.add(core); - } - } - + buildAttachArgs(newArgs, pid, exe, core); JStack.main(newArgs.toArray(new String[newArgs.size()])); } @@ -216,13 +247,14 @@ "heap", "binaryheap", "histo", "clstats", "finalizerinfo"}; ArrayList newArgs = new ArrayList(); - String exeORpid = null; + String pid = null; + String exe = null; String core = null; String s = null; while((s = sg.next(null, longOpts)) != null) { if (s.equals("exe")) { - exeORpid = sg.getOptarg(); + exe = sg.getOptarg(); continue; } if (s.equals("core")) { @@ -230,7 +262,7 @@ continue; } if (s.equals("pid")) { - exeORpid = sg.getOptarg(); + pid = sg.getOptarg(); continue; } if (s.equals("heap")) { @@ -255,13 +287,7 @@ } } - if (exeORpid != null) { - newArgs.add(exeORpid); - if (core != null) { - newArgs.add(core); - } - } - + buildAttachArgs(newArgs, pid, exe, core); JMap.main(newArgs.toArray(new String[newArgs.size()])); } @@ -271,13 +297,14 @@ "flags", "sysprops"}; ArrayList newArgs = new ArrayList(); - String exeORpid = null; + String exe = null; + String pid = null; String core = null; String s = null; while((s = sg.next(null, longOpts)) != null) { if (s.equals("exe")) { - exeORpid = sg.getOptarg(); + exe = sg.getOptarg(); continue; } if (s.equals("core")) { @@ -285,7 +312,7 @@ continue; } if (s.equals("pid")) { - exeORpid = sg.getOptarg(); + pid = sg.getOptarg(); continue; } if (s.equals("flags")) { @@ -298,14 +325,41 @@ } } - if (exeORpid != null) { - newArgs.add(exeORpid); - if (core != null) { - newArgs.add(core); + buildAttachArgs(newArgs, pid, exe, core); + JInfo.main(newArgs.toArray(new String[newArgs.size()])); + } + + private static void runJSNAP(String[] oldArgs) { + SAGetopt sg = new SAGetopt(oldArgs); + String[] longOpts = {"exe=", "core=", "pid=", "all"}; + + ArrayList newArgs = new ArrayList(); + String exe = null; + String pid = null; + String core = null; + String s = null; + + while((s = sg.next(null, longOpts)) != null) { + if (s.equals("exe")) { + exe = sg.getOptarg(); + continue; + } + if (s.equals("core")) { + core = sg.getOptarg(); + continue; + } + if (s.equals("pid")) { + pid = sg.getOptarg(); + continue; + } + if (s.equals("all")) { + newArgs.add("-a"); + continue; } } - JInfo.main(newArgs.toArray(new String[newArgs.size()])); + buildAttachArgs(newArgs, pid, exe, core); + JSnap.main(newArgs.toArray(new String[newArgs.size()])); } public static void main(String[] args) { @@ -329,31 +383,43 @@ String[] oldArgs = Arrays.copyOfRange(args, 1, args.length); - // Run SA interactive mode - if (args[0].equals("clhsdb")) { - runCLHSDB(oldArgs); - return; - } + try { + // Run SA interactive mode + if (args[0].equals("clhsdb")) { + runCLHSDB(oldArgs); + return; + } - if (args[0].equals("hsdb")) { - runHSDB(oldArgs); - return; - } + if (args[0].equals("hsdb")) { + runHSDB(oldArgs); + return; + } + + // Run SA tmtools mode + if (args[0].equals("jstack")) { + runJSTACK(oldArgs); + return; + } - // Run SA tmtools mode - if (args[0].equals("jstack")) { - runJSTACK(oldArgs); - return; - } + if (args[0].equals("jmap")) { + runJMAP(oldArgs); + return; + } + + if (args[0].equals("jinfo")) { + runJINFO(oldArgs); + return; + } - if (args[0].equals("jmap")) { - runJMAP(oldArgs); - return; - } + if (args[0].equals("jsnap")) { + runJSNAP(oldArgs); + return; + } - if (args[0].equals("jinfo")) { - runJINFO(oldArgs); - return; + throw new IllegalArgumentException("Unknown tool: " + args[0]); + } catch (Exception e) { + System.err.println(e.getMessage()); + toolHelp(args[0]); } } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCCause.java Wed Jul 05 21:35:27 2017 +0200 @@ -35,6 +35,11 @@ _gc_locker ("GCLocker Initiated GC"), _heap_inspection ("Heap Inspection Initiated GC"), _heap_dump ("Heap Dump Initiated GC"), + _wb_young_gc ("WhiteBox Initiated Young GC"), + _wb_conc_mark ("WhiteBox Initiated Concurrent Mark"), + _wb_full_gc ("WhiteBox Initiated Full GC"), + _update_allocation_context_stats_inc ("Update Allocation Context Stats"), + _update_allocation_context_stats_full ("Update Allocation Context Stats"), _no_gc ("No GC"), _no_cause_specified ("Unknown GCCause"), @@ -42,6 +47,7 @@ _tenured_generation_full ("Tenured Generation Full"), _metadata_GC_threshold ("Metadata GC Threshold"), + _metadata_GC_clear_soft_refs ("Metadata GC Clear Soft References"), _cms_generation_full ("CMS Generation Full"), _cms_initial_mark ("CMS Initial Mark"), @@ -55,7 +61,8 @@ _g1_inc_collection_pause ("G1 Evacuation Pause"), _g1_humongous_allocation ("G1 Humongous Allocation"), - _last_ditch_collection ("Last ditch collection"), + _dcmd_gc_run ("Diagnostic Command"), + _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"); private final String value; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Wed Jul 05 21:35:27 2017 +0200 @@ -780,8 +780,8 @@ return getPath("java.class.path"); } - public List bootClassPath() { - return getPath("sun.boot.class.path"); + public List bootClassPath() { + return Collections.emptyList(); } public String baseDirectory() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodCounters.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodCounters.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodCounters.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,8 +47,10 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("MethodCounters"); - interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); - interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); + if (VM.getVM().isServerCompiler()) { + interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); + interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); + } if (!VM.getVM().isCore()) { invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); @@ -61,11 +63,19 @@ private static CIntField backedgeCounter; public int interpreterInvocationCount() { - return (int) interpreterInvocationCountField.getValue(this); + if (interpreterInvocationCountField != null) { + return (int) interpreterInvocationCountField.getValue(this); + } else { + return 0; + } } public int interpreterThrowoutCount() { - return (int) interpreterThrowoutCountField.getValue(this); + if (interpreterThrowoutCountField != null) { + return (int) interpreterThrowoutCountField.getValue(this); + } else { + return 0; + } } public long getInvocationCounter() { if (Assert.ASSERTS_ENABLED) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,7 +130,7 @@ virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class); } // for now, use JavaThread itself. fix it later with appropriate class if needed - virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class); + virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class); virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class); virtualConstructor.addMapping("ServiceThread", ServiceThread.class); } @@ -172,7 +172,7 @@ return thread; } catch (Exception e) { throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr + - " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, SurrogateLockerThread, or CodeCacheSweeperThread)", e); + " (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/JSnap.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/JSnap.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/JSnap.java Wed Jul 05 21:35:27 2017 +0200 @@ -25,11 +25,15 @@ package sun.jvm.hotspot.tools; import java.io.*; +import java.util.*; +import java.util.stream.*; import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.runtime.*; public class JSnap extends Tool { + private boolean all; + public JSnap() { super(); } @@ -45,7 +49,7 @@ if (prologue.accessible()) { PerfMemory.iterate(new PerfMemory.PerfDataEntryVisitor() { public boolean visit(PerfDataEntry pde) { - if (pde.supported()) { + if (all || pde.supported()) { out.print(pde.name()); out.print('='); out.println(pde.valueAsString()); @@ -62,8 +66,24 @@ } } + @Override + protected void printFlagsUsage() { + System.out.println(" -a\tto print all performance counters"); + super.printFlagsUsage(); + } + public static void main(String[] args) { JSnap js = new JSnap(); + js.all = Arrays.stream(args) + .anyMatch(s -> s.equals("-a")); + + if (js.all) { + args = Arrays.stream(args) + .filter(s -> !s.equals("-a")) + .collect(Collectors.toList()) + .toArray(new String[0]); + } + js.execute(args); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/CompactHashTable.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/CompactHashTable.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/CompactHashTable.java Wed Jul 05 21:35:27 2017 +0200 @@ -81,6 +81,12 @@ } public Symbol probe(byte[] name, long hash) { + + if (bucketCount() == 0) { + // The table is invalid, so don't try to lookup + return null; + } + long symOffset; Symbol sym; Address baseAddress = baseAddressField.getValue(addr); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaVM.java --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaVM.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaVM.java Wed Jul 05 21:35:27 2017 +0200 @@ -75,8 +75,6 @@ return vm.getVMRelease(); case FIELD_CLASS_PATH: return getClassPath(); - case FIELD_BOOT_CLASS_PATH: - return getBootClassPath(); case FIELD_USER_DIR: return getUserDir(); case FIELD_UNDEFINED: @@ -143,7 +141,6 @@ addField("type", FIELD_TYPE); addField("version", FIELD_VERSION); addField("classPath", FIELD_CLASS_PATH); - addField("bootClassPath", FIELD_BOOT_CLASS_PATH); addField("userDir", FIELD_USER_DIR); } @@ -217,10 +214,6 @@ return vm.getSystemProperty("java.class.path"); } - private String getBootClassPath() { - return vm.getSystemProperty("sun.boot.class.path"); - } - private String getUserDir() { return vm.getSystemProperty("user.dir"); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js --- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -837,7 +837,7 @@ vmType2Class["JavaThread"] = sapkg.runtime.JavaThread; vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread; vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread; -vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread; +vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread; vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread; // gc diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMemoryAccessProviderImpl.java --- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMemoryAccessProviderImpl.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMemoryAccessProviderImpl.java Wed Jul 05 21:35:27 2017 +0200 @@ -23,7 +23,6 @@ package jdk.vm.ci.hotspot; import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE; -import jdk.vm.ci.common.JVMCIError; import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding; import jdk.vm.ci.meta.Constant; import jdk.vm.ci.meta.JavaConstant; @@ -59,7 +58,7 @@ return true; } } else { - throw new JVMCIError("%s", metaspaceObject); + throw new IllegalArgumentException(String.valueOf(metaspaceObject)); } } return false; @@ -75,7 +74,7 @@ return prim.asLong(); } } - throw new JVMCIError("%s", base); + throw new IllegalArgumentException(String.valueOf(base)); } private static long readRawValue(Constant baseConstant, long displacement, int bits) { @@ -91,7 +90,7 @@ case Long.SIZE: return UNSAFE.getLong(base, displacement); default: - throw new JVMCIError("%d", bits); + throw new IllegalArgumentException(String.valueOf(bits)); } } else { long pointer = asRawPointer(baseConstant); @@ -105,7 +104,7 @@ case Long.SIZE: return UNSAFE.getLong(pointer + displacement); default: - throw new JVMCIError("%d", bits); + throw new IllegalArgumentException(String.valueOf(bits)); } } } @@ -178,7 +177,7 @@ case Double: return JavaConstant.forDouble(Double.longBitsToDouble(rawValue)); default: - throw new JVMCIError("Unsupported kind: %s", kind); + throw new IllegalArgumentException("Unsupported kind: " + kind); } } catch (NullPointerException e) { return null; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MemoryAccessProvider.java --- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MemoryAccessProvider.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MemoryAccessProvider.java Wed Jul 05 21:35:27 2017 +0200 @@ -35,8 +35,10 @@ * @param displacement the displacement within the object in bytes * @return the read value encapsulated in a {@link JavaConstant} object, or {@code null} if the * value cannot be read. + * @throws IllegalArgumentException if {@code kind} is {@link JavaKind#Void} or not + * {@linkplain JavaKind#isPrimitive() primitive} kind */ - JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant base, long displacement); + JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant base, long displacement) throws IllegalArgumentException; /** * Reads a primitive value using a base address and a displacement. @@ -46,8 +48,11 @@ * @param displacement the displacement within the object in bytes * @param bits the number of bits to read from memory * @return the read value encapsulated in a {@link JavaConstant} object of {@link JavaKind} kind + * @throws IllegalArgumentException if {@code kind} is {@link JavaKind#Void} or not + * {@linkplain JavaKind#isPrimitive() primitive} kind or {@code bits} is not 8, 16, + * 32 or 64 */ - JavaConstant readPrimitiveConstant(JavaKind kind, Constant base, long displacement, int bits); + JavaConstant readPrimitiveConstant(JavaKind kind, Constant base, long displacement, int bits) throws IllegalArgumentException; /** * Reads a Java {@link Object} value using a base address and a displacement. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MethodHandleAccessProvider.java --- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MethodHandleAccessProvider.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MethodHandleAccessProvider.java Wed Jul 05 21:35:27 2017 +0200 @@ -51,6 +51,8 @@ /** * Returns the method handle method intrinsic identifier for the provided method, or * {@code null} if the method is not an intrinsic processed by this interface. + * + * @throws NullPointerException if {@code method} is null */ IntrinsicMethod lookupMethodHandleIntrinsic(ResolvedJavaMethod method); @@ -58,19 +60,27 @@ * Resolves the invocation target for an invocation of {@link IntrinsicMethod#INVOKE_BASIC * MethodHandle.invokeBasic} with the given constant receiver {@link MethodHandle}. Returns * {@code null} if the invocation target is not available at this time. - *

+ * * The first invocations of a method handle can use an interpreter to lookup the actual invoked * method; frequently executed method handles can use Java bytecode generation to avoid the * interpreter overhead. If the parameter forceBytecodeGeneration is set to true, the VM should * try to generate bytecodes before this method returns. + * + * @returns {@code null} if {@code methodHandle} is not a {@link MethodHandle} or the invocation + * target is not available at this time + * @throws NullPointerException if {@code methodHandle} is null */ ResolvedJavaMethod resolveInvokeBasicTarget(JavaConstant methodHandle, boolean forceBytecodeGeneration); /** * Resolves the invocation target for an invocation of a {@code MethodHandle.linkTo*} method * with the given constant member name. The member name is the last parameter of the - * {@code linkTo*} method. Returns {@code null} if the invocation target is not available at - * this time. + * {@code linkTo*} method. + * + * @returns {@code null} if the invocation target is not available at this time + * @throws NullPointerException if {@code memberName} is null + * @throws IllegalArgumentException if {@code memberName} is not a + * {@code java.lang.invoke.MemberName} */ ResolvedJavaMethod resolveLinkToTarget(JavaConstant memberName); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/aix/vm/os_aix.cpp --- a/hotspot/src/os/aix/vm/os_aix.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -910,8 +910,8 @@ log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ", (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } else { - log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.", - strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); + log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.", + ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } pthread_attr_destroy(&attr); @@ -1178,7 +1178,7 @@ size_t os::lasterror(char *buf, size_t len) { if (errno == 0) return 0; - const char *s = ::strerror(errno); + const char *s = os::strerror(errno); size_t n = ::strlen(s); if (n >= len) { n = len - 1; @@ -1714,14 +1714,14 @@ if (os::Aix::on_aix()) { int rc = ::sem_post(&sig_sem); if (rc == -1 && !warn_only_once) { - trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno)); + trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno)); warn_only_once = true; } } else { guarantee0(p_sig_msem != NULL); int rc = ::msem_unlock(p_sig_msem, 0); if (rc == -1 && !warn_only_once) { - trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno)); + trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno)); warn_only_once = true; } } @@ -1732,14 +1732,14 @@ if (os::Aix::on_aix()) { int rc = ::sem_wait(&sig_sem); if (rc == -1 && !warn_only_once) { - trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno)); + trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno)); warn_only_once = true; } } else { guarantee0(p_sig_msem != NULL); // must init before use int rc = ::msem_lock(p_sig_msem, 0); if (rc == -1 && !warn_only_once) { - trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno)); + trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno)); warn_only_once = true; } } @@ -2203,7 +2203,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size, exec, - strerror(err), err); + os::errno_name(err), err); } #endif @@ -2412,7 +2412,7 @@ bool rc = ::mprotect(addr, size, prot) == 0 ? true : false; if (!rc) { - const char* const s_errno = strerror(errno); + const char* const s_errno = os::errno_name(errno); warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno); return false; } @@ -2634,7 +2634,7 @@ if (ret != 0) { trcVerbose("Could not change priority for thread %d to %d (error %d, %s)", - (int)thr, newpri, ret, strerror(ret)); + (int)thr, newpri, ret, os::errno_name(ret)); } return (ret == 0) ? OS_OK : OS_ERR; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/aix/vm/perfMemory_aix.cpp --- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ #include "oops/oop.inline.hpp" #include "os_aix.inline.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/os.hpp" #include "runtime/perfMemory.hpp" #include "services/memTracker.hpp" #include "utilities/exceptions.hpp" @@ -101,7 +102,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not create Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } } else { int fd = result; @@ -112,7 +113,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not write Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } break; } @@ -124,7 +125,7 @@ result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { - warning("Could not close %s: %s\n", destfile, strerror(errno)); + warning("Could not close %s: %s\n", destfile, os::strerror(errno)); } } } @@ -397,7 +398,7 @@ if (errno == ELOOP) { warning("directory %s is a symlink and is not secure\n", dirname); } else { - warning("could not open directory %s: %s\n", dirname, strerror(errno)); + warning("could not open directory %s: %s\n", dirname, os::strerror(errno)); } } return dirp; @@ -507,7 +508,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed on %s: %s\n", filename, strerror(errno)); + warning("fstat failed on %s: %s\n", filename, os::strerror(errno)); } return false; } @@ -543,7 +544,7 @@ if (PrintMiscellaneous && Verbose) { if (result != 0) { warning("Could not retrieve passwd entry: %s\n", - strerror(result)); + os::strerror(result)); } else if (p == NULL) { // this check is added to protect against an observed problem @@ -557,7 +558,7 @@ // Bug Id 89052 was opened with RedHat. // warning("Could not retrieve passwd entry: %s\n", - strerror(errno)); + os::strerror(errno)); } else { warning("Could not determine user name: %s\n", @@ -593,7 +594,7 @@ "Process not found"); } else /* EPERM */ { - THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); + THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno)); } } @@ -746,7 +747,7 @@ if (PrintMiscellaneous && Verbose && result == OS_ERR) { if (errno != ENOENT) { warning("Could not unlink shared memory backing" - " store file %s : %s\n", path, strerror(errno)); + " store file %s : %s\n", path, os::strerror(errno)); } } } @@ -849,7 +850,7 @@ // if (PrintMiscellaneous && Verbose) { warning("could not create directory %s: %s\n", - dirname, strerror(errno)); + dirname, os::strerror(errno)); } return false; } @@ -900,7 +901,7 @@ if (errno == ELOOP) { warning("file %s is a symlink and is not secure\n", filename); } else { - warning("could not create file %s: %s\n", filename, strerror(errno)); + warning("could not create file %s: %s\n", filename, os::strerror(errno)); } } // Close the directory and reset the current working directory. @@ -924,7 +925,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)0), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not truncate shared memory file: %s\n", strerror(errno)); + warning("could not truncate shared memory file: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -933,7 +934,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)size), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not set shared memory file size: %s\n", strerror(errno)); + warning("could not set shared memory file size: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -968,7 +969,7 @@ "Permission denied"); } else { - THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); + THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno)); } } int fd = result; @@ -1041,7 +1042,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed - %s\n", strerror(errno)); + warning("mmap failed - %s\n", os::strerror(errno)); } remove_file(filename); FREE_C_HEAP_ARRAY(char, filename); @@ -1109,7 +1110,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed: %s\n", strerror(errno)); + warning("fstat failed: %s\n", os::strerror(errno)); } THROW_MSG_0(vmSymbols::java_io_IOException(), "Could not determine PerfMemory size"); @@ -1231,7 +1232,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed: %s\n", strerror(errno)); + warning("mmap failed: %s\n", os::strerror(errno)); } THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Could not map PerfMemory"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/bsd/vm/os_bsd.cpp --- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -789,7 +789,7 @@ (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } else { log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.", - strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); + os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } pthread_attr_destroy(&attr); @@ -1122,7 +1122,7 @@ size_t os::lasterror(char *buf, size_t len) { if (errno == 0) return 0; - const char *s = ::strerror(errno); + const char *s = os::strerror(errno); size_t n = ::strlen(s); if (n >= len) { n = len - 1; @@ -2141,7 +2141,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size, exec, - strerror(err), err); + os::errno_name(err), err); } // NOTE: Bsd kernel does not really reserve the pages for us. @@ -3422,7 +3422,7 @@ Bsd::set_page_size(getpagesize()); if (Bsd::page_size() == -1) { - fatal("os_bsd.cpp: os::init: sysconf failed (%s)", strerror(errno)); + fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno)); } init_page_sizes((size_t) Bsd::page_size()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/bsd/vm/perfMemory_bsd.cpp --- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "os_bsd.inline.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/os.hpp" #include "runtime/perfMemory.hpp" #include "services/memTracker.hpp" #include "utilities/exceptions.hpp" @@ -100,7 +101,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not create Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } } else { int fd = result; @@ -111,7 +112,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not write Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } break; } @@ -123,7 +124,7 @@ result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { - warning("Could not close %s: %s\n", destfile, strerror(errno)); + warning("Could not close %s: %s\n", destfile, os::strerror(errno)); } } } @@ -309,7 +310,7 @@ if (errno == ELOOP) { warning("directory %s is a symlink and is not secure\n", dirname); } else { - warning("could not open directory %s: %s\n", dirname, strerror(errno)); + warning("could not open directory %s: %s\n", dirname, os::strerror(errno)); } } return dirp; @@ -420,7 +421,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed on %s: %s\n", filename, strerror(errno)); + warning("fstat failed on %s: %s\n", filename, os::strerror(errno)); } return false; } @@ -459,7 +460,7 @@ if (PrintMiscellaneous && Verbose) { if (result != 0) { warning("Could not retrieve passwd entry: %s\n", - strerror(result)); + os::strerror(result)); } else if (p == NULL) { // this check is added to protect against an observed problem @@ -473,7 +474,7 @@ // Bug Id 89052 was opened with RedHat. // warning("Could not retrieve passwd entry: %s\n", - strerror(errno)); + os::strerror(errno)); } else { warning("Could not determine user name: %s\n", @@ -509,7 +510,7 @@ "Process not found"); } else /* EPERM */ { - THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); + THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno)); } } @@ -652,7 +653,7 @@ if (PrintMiscellaneous && Verbose && result == OS_ERR) { if (errno != ENOENT) { warning("Could not unlink shared memory backing" - " store file %s : %s\n", path, strerror(errno)); + " store file %s : %s\n", path, os::strerror(errno)); } } } @@ -762,7 +763,7 @@ // if (PrintMiscellaneous && Verbose) { warning("could not create directory %s: %s\n", - dirname, strerror(errno)); + dirname, os::strerror(errno)); } return false; } @@ -804,7 +805,7 @@ if (errno == ELOOP) { warning("file %s is a symlink and is not secure\n", filename); } else { - warning("could not create file %s: %s\n", filename, strerror(errno)); + warning("could not create file %s: %s\n", filename, os::strerror(errno)); } } // close the directory and reset the current working directory @@ -828,7 +829,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)0), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not truncate shared memory file: %s\n", strerror(errno)); + warning("could not truncate shared memory file: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -837,7 +838,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)size), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not set shared memory file size: %s\n", strerror(errno)); + warning("could not set shared memory file size: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -887,7 +888,7 @@ "Permission denied", OS_ERR); } else { - THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR); + THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR); } } int fd = result; @@ -961,7 +962,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed - %s\n", strerror(errno)); + warning("mmap failed - %s\n", os::strerror(errno)); } remove_file(filename); FREE_C_HEAP_ARRAY(char, filename); @@ -1025,7 +1026,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed: %s\n", strerror(errno)); + warning("fstat failed: %s\n", os::strerror(errno)); } THROW_MSG_0(vmSymbols::java_io_IOException(), "Could not determine PerfMemory size"); @@ -1136,7 +1137,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed: %s\n", strerror(errno)); + warning("mmap failed: %s\n", os::strerror(errno)); } THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Could not map PerfMemory"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/linux/vm/os_linux.cpp --- a/hotspot/src/os/linux/vm/os_linux.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -594,15 +594,7 @@ // _expand_stack_to() assumes its frame size is less than page size, which // should always be true if the function is not inlined. -#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute - #define NOINLINE -#else - #define NOINLINE __attribute__ ((noinline)) -#endif - -static void _expand_stack_to(address bottom) NOINLINE; - -static void _expand_stack_to(address bottom) { +static void NOINLINE _expand_stack_to(address bottom) { address sp; size_t size; volatile char *p; @@ -769,7 +761,7 @@ (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } else { log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.", - strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); + os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); } pthread_attr_destroy(&attr); @@ -890,6 +882,13 @@ assert(osthread != NULL, "osthread not set"); if (Thread::current()->osthread() == osthread) { +#ifdef ASSERT + sigset_t current; + sigemptyset(¤t); + pthread_sigmask(SIG_SETMASK, NULL, ¤t); + assert(!sigismember(¤t, SR_signum), "SR signal should not be blocked!"); +#endif + // Restore caller's signal mask sigset_t sigmask = osthread->caller_sigmask(); pthread_sigmask(SIG_SETMASK, &sigmask, NULL); @@ -1395,7 +1394,7 @@ size_t os::lasterror(char *buf, size_t len) { if (errno == 0) return 0; - const char *s = ::strerror(errno); + const char *s = os::strerror(errno); size_t n = ::strlen(s); if (n >= len) { n = len - 1; @@ -2601,7 +2600,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec, - strerror(err), err); + os::strerror(err), err); } static void warn_fail_commit_memory(char* addr, size_t size, @@ -2609,7 +2608,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, - alignment_hint, exec, strerror(err), err); + alignment_hint, exec, os::strerror(err), err); } // NOTE: Linux kernel does not really reserve the pages for us. @@ -3912,7 +3911,8 @@ // after sigsuspend. int old_errno = errno; - Thread* thread = Thread::current(); + Thread* thread = Thread::current_or_null_safe(); + assert(thread != NULL, "Missing current thread in SR_handler"); OSThread* osthread = thread->osthread(); assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); @@ -3924,7 +3924,7 @@ os::SuspendResume::State state = osthread->sr.suspended(); if (state == os::SuspendResume::SR_SUSPENDED) { sigset_t suspend_set; // signals for sigsuspend() - + sigemptyset(&suspend_set); // get current set of blocked signals and unblock resume signal pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); sigdelset(&suspend_set, SR_signum); @@ -4178,6 +4178,7 @@ // try to honor the signal mask sigset_t oset; + sigemptyset(&oset); pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); // call into the chained handler @@ -4188,7 +4189,7 @@ } // restore the signal mask - pthread_sigmask(SIG_SETMASK, &oset, 0); + pthread_sigmask(SIG_SETMASK, &oset, NULL); } // Tell jvm's signal handler the signal is taken care of. return true; @@ -4615,7 +4616,7 @@ Linux::set_page_size(sysconf(_SC_PAGESIZE)); if (Linux::page_size() == -1) { fatal("os_linux.cpp: os::init: sysconf failed (%s)", - strerror(errno)); + os::strerror(errno)); } init_page_sizes((size_t) Linux::page_size()); @@ -4633,7 +4634,7 @@ int status; pthread_condattr_t* _condattr = os::Linux::condAttr(); if ((status = pthread_condattr_init(_condattr)) != 0) { - fatal("pthread_condattr_init: %s", strerror(status)); + fatal("pthread_condattr_init: %s", os::strerror(status)); } // Only set the clock if CLOCK_MONOTONIC is available if (os::supports_monotonic_clock()) { @@ -4642,7 +4643,7 @@ warning("Unable to use monotonic clock with relative timed-waits" \ " - changes to the time-of-day clock may have adverse affects"); } else { - fatal("pthread_condattr_setclock: %s", strerror(status)); + fatal("pthread_condattr_setclock: %s", os::strerror(status)); } } } @@ -4888,7 +4889,7 @@ log_trace(os)("active_processor_count: " "CPU_ALLOC failed (%s) - using " "online processor count: %d", - strerror(errno), online_cpus); + os::strerror(errno), online_cpus); return online_cpus; } } @@ -4918,7 +4919,7 @@ else { cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN); warning("sched_getaffinity failed (%s)- using online processor count (%d) " - "which may exceed available processors", strerror(errno), cpu_count); + "which may exceed available processors", os::strerror(errno), cpu_count); } if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used @@ -5769,6 +5770,7 @@ // Don't catch signals while blocked; let the running threads have the signals. // (This allows a debugger to break into the running thread.) sigset_t oldsigs; + sigemptyset(&oldsigs); sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals(); pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); #endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/linux/vm/perfMemory_linux.cpp --- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "os_linux.inline.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/os.hpp" #include "runtime/perfMemory.hpp" #include "services/memTracker.hpp" #include "utilities/exceptions.hpp" @@ -100,7 +101,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not create Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } } else { int fd = result; @@ -111,7 +112,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not write Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } break; } @@ -123,7 +124,7 @@ result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { - warning("Could not close %s: %s\n", destfile, strerror(errno)); + warning("Could not close %s: %s\n", destfile, os::strerror(errno)); } } } @@ -308,7 +309,7 @@ if (errno == ELOOP) { warning("directory %s is a symlink and is not secure\n", dirname); } else { - warning("could not open directory %s: %s\n", dirname, strerror(errno)); + warning("could not open directory %s: %s\n", dirname, os::strerror(errno)); } } return dirp; @@ -419,7 +420,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed on %s: %s\n", filename, strerror(errno)); + warning("fstat failed on %s: %s\n", filename, os::strerror(errno)); } return false; } @@ -459,7 +460,7 @@ if (PrintMiscellaneous && Verbose) { if (result != 0) { warning("Could not retrieve passwd entry: %s\n", - strerror(result)); + os::strerror(result)); } else if (p == NULL) { // this check is added to protect against an observed problem @@ -473,7 +474,7 @@ // Bug Id 89052 was opened with RedHat. // warning("Could not retrieve passwd entry: %s\n", - strerror(errno)); + os::strerror(errno)); } else { warning("Could not determine user name: %s\n", @@ -509,7 +510,7 @@ "Process not found"); } else /* EPERM */ { - THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); + THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno)); } } @@ -664,7 +665,7 @@ if (PrintMiscellaneous && Verbose && result == OS_ERR) { if (errno != ENOENT) { warning("Could not unlink shared memory backing" - " store file %s : %s\n", path, strerror(errno)); + " store file %s : %s\n", path, os::strerror(errno)); } } } @@ -772,7 +773,7 @@ // if (PrintMiscellaneous && Verbose) { warning("could not create directory %s: %s\n", - dirname, strerror(errno)); + dirname, os::strerror(errno)); } return false; } @@ -814,7 +815,7 @@ if (errno == ELOOP) { warning("file %s is a symlink and is not secure\n", filename); } else { - warning("could not create file %s: %s\n", filename, strerror(errno)); + warning("could not create file %s: %s\n", filename, os::strerror(errno)); } } // close the directory and reset the current working directory @@ -838,7 +839,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)0), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not truncate shared memory file: %s\n", strerror(errno)); + warning("could not truncate shared memory file: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -847,7 +848,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)size), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not set shared memory file size: %s\n", strerror(errno)); + warning("could not set shared memory file size: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -897,7 +898,7 @@ "Permission denied", OS_ERR); } else { - THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR); + THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR); } } int fd = result; @@ -970,7 +971,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed - %s\n", strerror(errno)); + warning("mmap failed - %s\n", os::strerror(errno)); } remove_file(filename); FREE_C_HEAP_ARRAY(char, filename); @@ -1034,7 +1035,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed: %s\n", strerror(errno)); + warning("fstat failed: %s\n", os::strerror(errno)); } THROW_MSG_0(vmSymbols::java_io_IOException(), "Could not determine PerfMemory size"); @@ -1151,7 +1152,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed: %s\n", strerror(errno)); + warning("mmap failed: %s\n", os::strerror(errno)); } THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Could not map PerfMemory"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/posix/vm/os_posix.cpp --- a/hotspot/src/os/posix/vm/os_posix.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/posix/vm/os_posix.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1144,7 +1144,8 @@ #define check_with_errno(check_type, cond, msg) \ do { \ int err = errno; \ - check_type(cond, "%s; error='%s' (errno=%d)", msg, strerror(err), err); \ + check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \ + os::errno_name(err)); \ } while (false) #define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/solaris/vm/attachListener_solaris.cpp --- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -461,7 +461,7 @@ while ((res = ::sema_wait(wakeup())) == EINTR) ; if (res) { - warning("sema_wait failed: %s", strerror(res)); + warning("sema_wait failed: %s", os::strerror(res)); return NULL; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/solaris/vm/os_solaris.cpp --- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1009,7 +1009,7 @@ (uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags)); } else { log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.", - strerror(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags)); + os::errno_name(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags)); } if (status != 0) { @@ -1354,7 +1354,7 @@ jlong os::javaTimeMillis() { timeval t; if (gettimeofday(&t, NULL) == -1) { - fatal("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)); + fatal("os::javaTimeMillis: gettimeofday (%s)", os::strerror(errno)); } return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; } @@ -1362,7 +1362,7 @@ void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { timeval t; if (gettimeofday(&t, NULL) == -1) { - fatal("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno)); + fatal("os::javaTimeSystemUTC: gettimeofday (%s)", os::strerror(errno)); } seconds = jlong(t.tv_sec); nanos = jlong(t.tv_usec) * 1000; @@ -1892,21 +1892,39 @@ static bool check_addr0(outputStream* st) { jboolean status = false; + const int read_chunk = 200; + int ret = 0; + int nmap = 0; int fd = ::open("/proc/self/map",O_RDONLY); if (fd >= 0) { - prmap_t p; - while (::read(fd, &p, sizeof(p)) > 0) { - if (p.pr_vaddr == 0x0) { - st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); - st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); - st->print("Access:"); - st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); - st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); - st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); - st->cr(); - status = true; + prmap_t *p = NULL; + char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t)); + if (NULL == mbuff) { + ::close(fd); + return status; + } + while ((ret = ::read(fd, mbuff, read_chunk*sizeof(prmap_t))) > 0) { + //check if read() has not read partial data + if( 0 != ret % sizeof(prmap_t)){ + break; + } + nmap = ret / sizeof(prmap_t); + p = (prmap_t *)mbuff; + for(int i = 0; i < nmap; i++){ + if (p->pr_vaddr == 0x0) { + st->print("Warning: Address: " PTR_FORMAT ", Size: " SIZE_FORMAT "K, ",p->pr_vaddr, p->pr_size/1024); + st->print("Mapped file: %s, ", p->pr_mapname[0] == '\0' ? "None" : p->pr_mapname); + st->print("Access: "); + st->print("%s",(p->pr_mflags & MA_READ) ? "r" : "-"); + st->print("%s",(p->pr_mflags & MA_WRITE) ? "w" : "-"); + st->print("%s",(p->pr_mflags & MA_EXEC) ? "x" : "-"); + st->cr(); + status = true; + } + p++; } } + free(mbuff); ::close(fd); } return status; @@ -2142,7 +2160,7 @@ size_t os::lasterror(char *buf, size_t len) { if (errno == 0) return 0; - const char *s = ::strerror(errno); + const char *s = os::strerror(errno); size_t n = ::strlen(s); if (n >= len) { n = len - 1; @@ -2351,7 +2369,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, - strerror(err), err); + os::strerror(err), err); } static void warn_fail_commit_memory(char* addr, size_t bytes, @@ -2359,7 +2377,7 @@ int err) { warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, - alignment_hint, exec, strerror(err), err); + alignment_hint, exec, os::strerror(err), err); } int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { @@ -2740,7 +2758,7 @@ char buf[256]; buf[0] = '\0'; if (addr == NULL) { - jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); + jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err)); } warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT @@ -4354,7 +4372,7 @@ page_size = sysconf(_SC_PAGESIZE); if (page_size == -1) { - fatal("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno)); + fatal("os_solaris.cpp: os::init: sysconf failed (%s)", os::strerror(errno)); } init_page_sizes((size_t) page_size); @@ -4366,7 +4384,7 @@ int fd = ::open("/dev/zero", O_RDWR); if (fd < 0) { - fatal("os::init: cannot open /dev/zero (%s)", strerror(errno)); + fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno)); } else { Solaris::set_dev_zero_fd(fd); @@ -5607,7 +5625,7 @@ if (pid < 0) { // fork failed - warning("fork failed: %s", strerror(errno)); + warning("fork failed: %s", os::strerror(errno)); return -1; } else if (pid == 0) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/solaris/vm/perfMemory_solaris.cpp --- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,7 +102,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not create Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } } else { @@ -114,7 +114,7 @@ if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not write Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } break; } @@ -125,7 +125,7 @@ result = ::close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { - warning("Could not close %s: %s\n", destfile, strerror(errno)); + warning("Could not close %s: %s\n", destfile, os::strerror(errno)); } } } @@ -311,7 +311,7 @@ if (errno == ELOOP) { warning("directory %s is a symlink and is not secure\n", dirname); } else { - warning("could not open directory %s: %s\n", dirname, strerror(errno)); + warning("could not open directory %s: %s\n", dirname, os::strerror(errno)); } } return dirp; @@ -422,7 +422,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed on %s: %s\n", filename, strerror(errno)); + warning("fstat failed on %s: %s\n", filename, os::strerror(errno)); } return false; } @@ -464,7 +464,7 @@ if (PrintMiscellaneous && Verbose) { if (p == NULL) { warning("Could not retrieve passwd entry: %s\n", - strerror(errno)); + os::strerror(errno)); } else { warning("Could not determine user name: %s\n", @@ -500,7 +500,7 @@ "Process not found"); } else /* EPERM */ { - THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno)); + THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno)); } } @@ -657,7 +657,7 @@ // In this case, the psinfo file for the process id existed, // but we didn't have permission to access it. THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), - strerror(errno)); + os::strerror(errno)); } // at this point, we don't know if the process id itself doesn't @@ -703,7 +703,7 @@ if (PrintMiscellaneous && Verbose && result == OS_ERR) { if (errno != ENOENT) { warning("Could not unlink shared memory backing" - " store file %s : %s\n", path, strerror(errno)); + " store file %s : %s\n", path, os::strerror(errno)); } } } @@ -813,7 +813,7 @@ // if (PrintMiscellaneous && Verbose) { warning("could not create directory %s: %s\n", - dirname, strerror(errno)); + dirname, os::strerror(errno)); } return false; } @@ -855,7 +855,7 @@ if (errno == ELOOP) { warning("file %s is a symlink and is not secure\n", filename); } else { - warning("could not create file %s: %s\n", filename, strerror(errno)); + warning("could not create file %s: %s\n", filename, os::strerror(errno)); } } // close the directory and reset the current working directory @@ -879,7 +879,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)0), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not truncate shared memory file: %s\n", strerror(errno)); + warning("could not truncate shared memory file: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -888,7 +888,7 @@ RESTARTABLE(::ftruncate(fd, (off_t)size), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("could not set shared memory file size: %s\n", strerror(errno)); + warning("could not set shared memory file size: %s\n", os::strerror(errno)); } ::close(fd); return -1; @@ -916,7 +916,7 @@ "Permission denied", OS_ERR); } else { - THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR); + THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR); } } int fd = result; @@ -990,7 +990,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed - %s\n", strerror(errno)); + warning("mmap failed - %s\n", os::strerror(errno)); } remove_file(filename); FREE_C_HEAP_ARRAY(char, filename); @@ -1055,7 +1055,7 @@ RESTARTABLE(::fstat(fd, &statbuf), result); if (result == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("fstat failed: %s\n", strerror(errno)); + warning("fstat failed: %s\n", os::strerror(errno)); } THROW_MSG_0(vmSymbols::java_io_IOException(), "Could not determine PerfMemory size"); @@ -1172,7 +1172,7 @@ if (mapAddress == MAP_FAILED) { if (PrintMiscellaneous && Verbose) { - warning("mmap failed: %s\n", strerror(errno)); + warning("mmap failed: %s\n", os::strerror(errno)); } THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Could not map PerfMemory"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/solaris/vm/threadCritical_solaris.cpp --- a/hotspot/src/os/solaris/vm/threadCritical_solaris.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/solaris/vm/threadCritical_solaris.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "runtime/os.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp" @@ -49,7 +50,7 @@ if (global_mut_owner != owner) { if (os::Solaris::mutex_lock(&global_mut)) fatal("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", - strerror(errno)); + os::strerror(errno)); assert(global_mut_count == 0, "must have clean count"); assert(global_mut_owner == -1, "must have clean owner"); } @@ -68,7 +69,7 @@ if (global_mut_count == 0) { global_mut_owner = -1; if (os::Solaris::mutex_unlock(&global_mut)) - fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno)); + fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", os::strerror(errno)); } } else { assert (Threads::number_of_threads() == 0, "valid only during initialization"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os/windows/vm/os_windows.cpp --- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -642,7 +642,7 @@ thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); } else { log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", - strerror(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); + os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); } if (thread_handle == NULL) { @@ -1898,7 +1898,7 @@ if (errno != 0) { // C runtime error that has no corresponding DOS error code - const char* s = strerror(errno); + const char* s = os::strerror(errno); size_t n = strlen(s); if (n >= len) n = len - 1; strncpy(buf, s, n); @@ -2186,13 +2186,6 @@ // Windows Vista/2008 heap corruption check #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 -#define def_excpt(val) #val, val - -struct siglabel { - char *name; - int number; -}; - // All Visual C++ exceptions thrown from code generated by the Microsoft Visual // C++ compiler contain this error code. Because this is a compiler-generated // error, the code is not listed in the Win32 API header files. @@ -2202,8 +2195,9 @@ #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 - -struct siglabel exceptlabels[] = { +#define def_excpt(val) { #val, (val) } + +static const struct { char* name; uint number; } exceptlabels[] = { def_excpt(EXCEPTION_ACCESS_VIOLATION), def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), def_excpt(EXCEPTION_BREAKPOINT), @@ -2228,16 +2222,18 @@ def_excpt(EXCEPTION_GUARD_PAGE), def_excpt(EXCEPTION_INVALID_HANDLE), def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), - def_excpt(EXCEPTION_HEAP_CORRUPTION), + def_excpt(EXCEPTION_HEAP_CORRUPTION) #ifdef _M_IA64 - def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), + , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION) #endif - NULL, 0 }; +#undef def_excpt + const char* os::exception_name(int exception_code, char *buf, size_t size) { - for (int i = 0; exceptlabels[i].name != NULL; i++) { - if (exceptlabels[i].number == exception_code) { + uint code = static_cast(exception_code); + for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { + if (exceptlabels[i].number == code) { jio_snprintf(buf, size, "%s", exceptlabels[i].name); return buf; } @@ -2445,7 +2441,7 @@ jio_snprintf(buf, sizeof(buf), "Execution protection violation " "at " INTPTR_FORMAT ", unguarding " INTPTR_FORMAT ": %s", addr, - page_start, (res ? "success" : strerror(errno))); + page_start, (res ? "success" : os::strerror(errno))); tty->print_raw_cr(buf); } @@ -5638,9 +5634,11 @@ "TERM", SIGTERM, // software term signal from kill "BREAK", SIGBREAK, // Ctrl-Break sequence "ILL", SIGILL}; // illegal instruction - for(int i=0;i 0;) { @@ -105,7 +105,7 @@ if (nbytes == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not write Perfdata save file: %s: %s\n", - destfile, strerror(errno)); + destfile, os::strerror(errno)); } break; } @@ -117,7 +117,7 @@ int result = ::_close(fd); if (PrintMiscellaneous && Verbose) { if (result == OS_ERR) { - warning("Could not close %s: %s\n", destfile, strerror(errno)); + warning("Could not close %s: %s\n", destfile, os::strerror(errno)); } } } @@ -497,7 +497,7 @@ if (PrintMiscellaneous && Verbose) { if (errno != ENOENT) { warning("Could not unlink shared memory backing" - " store file %s : %s\n", path, strerror(errno)); + " store file %s : %s\n", path, os::strerror(errno)); } } } @@ -1358,7 +1358,7 @@ if (ret_code == OS_ERR) { if (PrintMiscellaneous && Verbose) { warning("Could not get status information from file %s: %s\n", - filename, strerror(errno)); + filename, os::strerror(errno)); } CloseHandle(fmh); CloseHandle(fh); @@ -1553,7 +1553,7 @@ // if (::stat(filename, &statbuf) == OS_ERR) { if (PrintMiscellaneous && Verbose) { - warning("stat %s failed: %s\n", filename, strerror(errno)); + warning("stat %s failed: %s\n", filename, os::strerror(errno)); } THROW_MSG_0(vmSymbols::java_io_IOException(), "Could not determine PerfMemory size"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp --- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "interpreter/interpreter.hpp" #include "jvm_windows.h" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "mutex_windows.inline.hpp" #include "nativeInst_x86.hpp" #include "os_share_windows.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_Compilation.cpp --- a/hotspot/src/share/vm/c1/c1_Compilation.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,9 @@ #include "code/debugInfoRec.hpp" #include "compiler/compileLog.hpp" #include "compiler/compilerDirectives.hpp" +#include "memory/resourceArea.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/timerTrace.hpp" typedef enum { _t_compile, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_GraphBuilder.cpp --- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ #include "ci/ciMemberName.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/compilationPolicy.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_IR.cpp --- a/hotspot/src/share/vm/c1/c1_IR.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_IR.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "c1/c1_IR.hpp" #include "c1/c1_InstructionPrinter.hpp" #include "c1/c1_Optimizer.hpp" +#include "memory/resourceArea.hpp" #include "utilities/bitMap.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_LIRAssembler.cpp --- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -556,17 +556,16 @@ leal(op->in_opr(), op->result_opr()); break; - case lir_null_check: - if (GenerateCompilerNullChecks) { - ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); + case lir_null_check: { + ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); - if (op->in_opr()->is_single_cpu()) { - _masm->null_check(op->in_opr()->as_register(), stub->entry()); - } else { - Unimplemented(); - } + if (op->in_opr()->is_single_cpu()) { + _masm->null_check(op->in_opr()->as_register(), stub->entry()); + } else { + Unimplemented(); } break; + } case lir_monaddr: monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_LIRGenerator.cpp --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -2041,8 +2041,7 @@ // to avoid a fixed interval with an oop during the null check. // Use a copy of the CodeEmitInfo because debug information is // different for null_check and throw. - if (GenerateCompilerNullChecks && - (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { + if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) { // if the exception object wasn't created using new then it might be null. __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci()))); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_LinearScan.cpp --- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "c1/c1_LinearScan.hpp" #include "c1/c1_ValueStack.hpp" #include "code/vmreg.inline.hpp" +#include "runtime/timerTrace.hpp" #include "utilities/bitMap.inline.hpp" #ifndef PRODUCT diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_Optimizer.cpp --- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "c1/c1_ValueMap.hpp" #include "c1/c1_ValueSet.hpp" #include "c1/c1_ValueStack.hpp" +#include "memory/resourceArea.hpp" #include "utilities/bitMap.inline.hpp" #include "compiler/compileLog.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_ValueType.cpp --- a/hotspot/src/share/vm/c1/c1_ValueType.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_ValueType.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "ci/ciArray.hpp" #include "ci/ciInstance.hpp" #include "ci/ciNullObject.hpp" +#include "memory/resourceArea.hpp" // predefined types diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/c1/c1_globals.hpp --- a/hotspot/src/share/vm/c1/c1_globals.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/c1/c1_globals.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -176,7 +176,7 @@ product(bool, InlineSynchronizedMethods, true, \ "Inline synchronized methods") \ \ - diagnostic(bool, InlineNIOCheckIndex, true, \ + develop(bool, InlineNIOCheckIndex, true, \ "Intrinsify java.nio.Buffer.checkIndex") \ \ develop(bool, CanonicalizeNodes, true, \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciEnv.cpp --- a/hotspot/src/share/vm/ci/ciEnv.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciEnv.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciInstanceKlass.cpp --- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "classfile/systemDictionary.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" #include "runtime/fieldDescriptor.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciReplay.cpp --- a/hotspot/src/share/vm/ci/ciReplay.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciReplay.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,4 +1,5 @@ -/* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. +/* + * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +29,7 @@ #include "ci/ciKlass.hpp" #include "ci/ciUtilities.hpp" #include "compiler/compileBroker.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" @@ -574,7 +576,7 @@ Method* method = parse_method(CHECK); if (had_error()) return; /* just copied from Method, to build interpret data*/ - if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) { + if (ReferencePendingListLocker::is_locked_by_self()) { return; } // To be properly initialized, some profiling in the MDO needs the diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciSignature.cpp --- a/hotspot/src/share/vm/ci/ciSignature.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciSignature.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "ci/ciSignature.hpp" #include "ci/ciUtilities.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/signature.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciType.cpp --- a/hotspot/src/share/vm/ci/ciType.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciType.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "ci/ciType.hpp" #include "ci/ciUtilities.hpp" #include "classfile/systemDictionary.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" ciType* ciType::_basic_types[T_CONFLICT+1]; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/ci/ciTypeFlow.cpp --- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,7 @@ #include "interpreter/bytecode.hpp" #include "interpreter/bytecodes.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "opto/compile.hpp" #include "opto/node.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/classFileParser.cpp --- a/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1967,7 +1967,7 @@ loader_data->is_platform_class_loader_data() || loader_data->is_anonymous(); switch (sid) { - case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature): { + case vmSymbols::VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): { if (_location != _in_method) break; // only allow for methods if (!privileged) break; // only allow in privileged code return _method_CallerSensitive; @@ -5372,12 +5372,12 @@ } } - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { ResourceMark rm; // print out the superclass. const char * from = ik->external_name(); if (ik->java_super() != NULL) { - log_info(classresolve)("%s %s (super)", + log_debug(classresolve)("%s %s (super)", from, ik->java_super()->external_name()); } @@ -5388,7 +5388,7 @@ for (int i = 0; i < length; i++) { const Klass* const k = local_interfaces->at(i); const char * to = k->external_name(); - log_info(classresolve)("%s %s (interface)", from, to); + log_debug(classresolve)("%s %s (interface)", from, to); } } } @@ -5698,15 +5698,16 @@ } if (!is_internal()) { - if (TraceClassLoadingPreorder) { - tty->print("[Loading %s", - _class_name->as_klass_external_name()); - + if (log_is_enabled(Debug, classload, preorder)){ + ResourceMark rm(THREAD); + outputStream* log = Log(classload, preorder)::debug_stream(); + log->print("%s", _class_name->as_klass_external_name()); if (stream->source() != NULL) { - tty->print(" from %s", stream->source()); + log->print(" source: %s", stream->source()); } - tty->print_cr("]"); + log->cr(); } + #if INCLUDE_CDS if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) { // Only dump the classes that can be stored into CDS archive diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/classFileStream.cpp --- a/hotspot/src/share/vm/classfile/classFileStream.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/classFileStream.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/classFileStream.hpp" #include "classfile/vmSymbols.hpp" +#include "memory/resourceArea.hpp" const bool ClassFileStream::verify = true; const bool ClassFileStream::no_verification = false; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/classLoader.cpp --- a/hotspot/src/share/vm/classfile/classLoader.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/classLoader.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -44,6 +44,7 @@ #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceRefKlass.hpp" @@ -226,11 +227,12 @@ return NULL; } -ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() { +ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name, bool is_boot_append) : ClassPathEntry() { _zip = zip; char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass); strcpy(copy, zip_name); _zip_name = copy; + _is_boot_append = is_boot_append; } ClassPathZipEntry::~ClassPathZipEntry() { @@ -274,11 +276,79 @@ return buffer; } +#if INCLUDE_CDS +u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TRAPS) { + u1* buffer = NULL; + if (!_is_boot_append) { + assert(DumpSharedSpaces, "Should be called only for non-boot entries during dump time"); + // We presume default is multi-release enabled + const char* multi_ver = Arguments::get_property("jdk.util.jar.enableMultiRelease"); + const char* verstr = Arguments::get_property("jdk.util.jar.version"); + bool is_multi_ver = (multi_ver == NULL || + strcmp(multi_ver, "true") == 0 || + strcmp(multi_ver, "force") == 0) && + is_multiple_versioned(THREAD); + // command line version setting + int version = 0; + const int base_version = 8; // JDK8 + int cur_ver = JDK_Version::current().major_version(); + if (verstr != NULL) { + version = atoi(verstr); + if (version < base_version || version > cur_ver) { + is_multi_ver = false; + // print out warning, do not use assertion here since it will continue to look + // for proper version. + warning("JDK%d is not supported in multiple version jars", version); + } + } + + if (is_multi_ver) { + int n; + char entry_name[JVM_MAXPATHLEN]; + if (version > 0) { + n = jio_snprintf(entry_name, sizeof(entry_name), "META-INF/versions/%d/%s", version, name); + entry_name[n] = '\0'; + buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL); + if (buffer == NULL) { + warning("Could not find %s in %s, try to find highest version instead", entry_name, _zip_name); + } + } + if (buffer == NULL) { + for (int i = cur_ver; i >= base_version; i--) { + n = jio_snprintf(entry_name, sizeof(entry_name), "META-INF/versions/%d/%s", i, name); + entry_name[n] = '\0'; + buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL); + if (buffer != NULL) { + break; + } + } + } + } + } + return buffer; +} + +bool ClassPathZipEntry::is_multiple_versioned(TRAPS) { + assert(DumpSharedSpaces, "called only at dump time"); + jint size; + char* buffer = (char*)open_entry("META-INF/MANIFEST.MF", &size, false, CHECK_false); + if (buffer != NULL) { + if (strstr(buffer, "Multi-Release: true") != NULL) { + return true; + } + } + return false; +} +#endif // INCLUDE_CDS + ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { jint filesize; - const u1* buffer = open_entry(name, &filesize, false, CHECK_NULL); + u1* buffer = open_versioned_entry(name, &filesize, CHECK_NULL); if (buffer == NULL) { - return NULL; + buffer = open_entry(name, &filesize, false, CHECK_NULL); + if (buffer == NULL) { + return NULL; + } } if (UsePerfData) { ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize); @@ -466,7 +536,7 @@ void ClassLoader::trace_class_path(const char* msg, const char* name) { if (log_is_enabled(Info, classpath)) { ResourceMark rm; - outputStream* out = LogHandle(classpath)::info_stream(); + outputStream* out = Log(classpath)::info_stream(); if (msg) { out->print("%s", msg); } @@ -558,7 +628,7 @@ char* path = NEW_RESOURCE_ARRAY(char, end - start + 1); strncpy(path, &class_path[start], end - start); path[end - start] = '\0'; - update_class_path_entry_list(path, false, mark_append_entry, false); + update_class_path_entry_list(path, false, mark_append_entry, false, bootstrap_search); // Check on the state of the boot loader's append path if (mark_append_entry && (_first_append_entry == NULL)) { @@ -582,7 +652,8 @@ } ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st, - bool throw_exception, TRAPS) { + bool throw_exception, + bool is_boot_append, TRAPS) { JavaThread* thread = JavaThread::current(); ClassPathEntry* new_entry = NULL; if ((st->st_mode & S_IFREG) == S_IFREG) { @@ -611,7 +682,7 @@ zip = (*ZipOpen)(canonical_path, &error_msg); } if (zip != NULL && error_msg == NULL) { - new_entry = new ClassPathZipEntry(zip, path); + new_entry = new ClassPathZipEntry(zip, path, is_boot_append); } else { ResourceMark rm(thread); char *msg; @@ -644,7 +715,7 @@ // Create a class path zip entry for a given path (return NULL if not found // or zip/JAR file cannot be opened) -ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path) { +ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bool is_boot_append) { // check for a regular file struct stat st; if (os::stat(path, &st) == 0) { @@ -662,7 +733,7 @@ } if (zip != NULL && error_msg == NULL) { // create using canonical path - return new ClassPathZipEntry(zip, canonical_path); + return new ClassPathZipEntry(zip, canonical_path, is_boot_append); } } } @@ -720,11 +791,11 @@ } void ClassLoader::add_to_list(const char *apath) { - update_class_path_entry_list((char*)apath, false, false, false); + update_class_path_entry_list((char*)apath, false, false, false, false); } void ClassLoader::prepend_to_list(const char *apath) { - update_class_path_entry_list((char*)apath, false, false, true); + update_class_path_entry_list((char*)apath, false, false, true, false); } // Returns true IFF the file/dir exists and the entry was successfully created. @@ -732,13 +803,14 @@ bool check_for_duplicates, bool mark_append_entry, bool prepend_entry, + bool is_boot_append, bool throw_exception) { struct stat st; if (os::stat(path, &st) == 0) { // File or directory found ClassPathEntry* new_entry = NULL; Thread* THREAD = Thread::current(); - new_entry = create_class_path_entry(path, &st, throw_exception, CHECK_(false)); + new_entry = create_class_path_entry(path, &st, throw_exception, is_boot_append, CHECK_(false)); if (new_entry == NULL) { return false; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/classLoader.hpp --- a/hotspot/src/share/vm/classfile/classLoader.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/classLoader.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -104,16 +104,19 @@ private: jzfile* _zip; // The zip archive const char* _zip_name; // Name of zip archive + bool _is_boot_append; // entry coming from -Xbootclasspath/a public: bool is_jrt() { return false; } bool is_jar_file() const { return true; } const char* name() const { return _zip_name; } JImageFile* jimage() const { return NULL; } - ClassPathZipEntry(jzfile* zip, const char* zip_name); + ClassPathZipEntry(jzfile* zip, const char* zip_name, bool is_boot_append); ~ClassPathZipEntry(); u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); + u1* open_versioned_entry(const char* name, jint* filesize, TRAPS) NOT_CDS_RETURN_(NULL); ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); + bool is_multiple_versioned(TRAPS) NOT_CDS_RETURN_(false); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) }; @@ -223,7 +226,8 @@ static void load_zip_library(); static void load_jimage_library(); static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st, - bool throw_exception, TRAPS); + bool throw_exception, + bool is_boot_append, TRAPS); public: @@ -249,6 +253,7 @@ bool check_for_duplicates, bool mark_append_entry, bool prepend_entry, + bool is_boot_append, bool throw_exception=true); static void print_bootclasspath(); @@ -394,7 +399,7 @@ static void prepend_to_list(ClassPathEntry* new_entry); // creates a class path zip entry (returns NULL if JAR file cannot be opened) - static ClassPathZipEntry* create_class_path_zip_entry(const char *apath); + static ClassPathZipEntry* create_class_path_zip_entry(const char *apath, bool is_boot_append); // add a path to class path list static void add_to_list(const char* apath); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/classLoaderData.cpp --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -60,6 +60,7 @@ #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -357,7 +358,7 @@ if (log_is_enabled(Debug, classloaderdata)) { ResourceMark rm; - outputStream* log = LogHandle(classloaderdata)::debug_stream(); + outputStream* log = Log(classloaderdata)::debug_stream(); log->print(": unload loader data " INTPTR_FORMAT, p2i(this)); log->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)class_loader()), loader_name()); @@ -717,7 +718,7 @@ } ResourceMark rm; - outputStream* log = LogHandle(classloaderdata)::debug_stream(); + outputStream* log = Log(classloaderdata)::debug_stream(); log->print("create class loader data " INTPTR_FORMAT, p2i(cld)); log->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)cld->class_loader()), cld->loader_name()); @@ -859,7 +860,7 @@ array->push(curr); if (log_is_enabled(Debug, classloaderdata)) { - outputStream* log = LogHandle(classloaderdata)::debug_stream(); + outputStream* log = Log(classloaderdata)::debug_stream(); log->print("found new CLD: "); curr->print_value_on(log); log->cr(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/defaultMethods.cpp --- a/hotspot/src/share/vm/classfile/defaultMethods.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -437,7 +437,7 @@ _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); if (log_is_enabled(Debug, defaultmethods)) { ResourceMark rm; - outputStream* logstream = LogHandle(defaultmethods)::debug_stream(); + outputStream* logstream = Log(defaultmethods)::debug_stream(); _exception_message->print_value_on(logstream); logstream->cr(); } @@ -663,7 +663,7 @@ if (log_is_enabled(Debug, defaultmethods)) { log_debug(defaultmethods)("Slots that need filling:"); ResourceMark rm; - outputStream* logstream = LogHandle(defaultmethods)::debug_stream(); + outputStream* logstream = Log(defaultmethods)::debug_stream(); streamIndentor si(logstream); for (int i = 0; i < slots->length(); ++i) { logstream->indent(); @@ -799,7 +799,7 @@ log_debug(defaultmethods)("%s %s requires default method processing", klass->is_interface() ? "Interface" : "Class", klass->name()->as_klass_external_name()); - PrintHierarchy printer(LogHandle(defaultmethods)::debug_stream()); + PrintHierarchy printer(Log(defaultmethods)::debug_stream()); printer.run(klass); } @@ -809,7 +809,7 @@ for (int i = 0; i < empty_slots->length(); ++i) { EmptyVtableSlot* slot = empty_slots->at(i); if (log_is_enabled(Debug, defaultmethods)) { - outputStream* logstream = LogHandle(defaultmethods)::debug_stream(); + outputStream* logstream = Log(defaultmethods)::debug_stream(); streamIndentor si(logstream, 2); logstream->indent().print("Looking for default methods for slot "); slot->print_on(logstream); @@ -917,7 +917,7 @@ if (log_is_enabled(Debug, defaultmethods)) { ResourceMark rm; - outputStream* logstream = LogHandle(defaultmethods)::debug_stream(); + outputStream* logstream = Log(defaultmethods)::debug_stream(); logstream->print("for slot: "); slot->print_on(logstream); logstream->cr(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/dictionary.cpp --- a/hotspot/src/share/vm/classfile/dictionary.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/dictionary.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/orderAccess.inline.hpp" @@ -137,7 +138,7 @@ } if (log_is_enabled(Trace, protectiondomain)) { ResourceMark rm; - outputStream* log = LogHandle(protectiondomain)::trace_stream(); + outputStream* log = Log(protectiondomain)::trace_stream(); print_count(log); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/javaClasses.cpp --- a/hotspot/src/share/vm/classfile/javaClasses.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -515,22 +515,6 @@ return result; } -unsigned int java_lang_String::hash_string(oop java_string) { - int length = java_lang_String::length(java_string); - // Zero length string doesn't necessarily hash to zero. - if (length == 0) { - return StringTable::hash_string((jchar*) NULL, 0); - } - - typeArrayOop value = java_lang_String::value(java_string); - bool is_latin1 = java_lang_String::is_latin1(java_string); - if (is_latin1) { - return StringTable::hash_string(value->byte_at_addr(0), length); - } else { - return StringTable::hash_string(value->char_at_addr(0), length); - } -} - Symbol* java_lang_String::as_symbol(Handle java_string, TRAPS) { oop obj = java_string(); typeArrayOop value = java_lang_String::value(obj); @@ -1473,6 +1457,12 @@ compute_offset(_ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature()); } + +void java_lang_Throwable::compute_offsets() { + Klass* k = SystemDictionary::Throwable_klass(); + compute_offset(depth_offset, k, vmSymbols::depth_name(), vmSymbols::int_signature()); +} + oop java_lang_Throwable::unassigned_stacktrace() { InstanceKlass* ik = SystemDictionary::Throwable_klass(); address addr = ik->static_field_addr(static_unassigned_stacktrace_offset); @@ -1492,11 +1482,13 @@ throwable->release_obj_field_put(backtrace_offset, value); } - -oop java_lang_Throwable::message(oop throwable) { - return throwable->obj_field(detailMessage_offset); -} - +int java_lang_Throwable::depth(oop throwable) { + return throwable->int_field(depth_offset); +} + +void java_lang_Throwable::set_depth(oop throwable, int value) { + throwable->int_field_put(depth_offset, value); +} oop java_lang_Throwable::message(Handle throwable) { return throwable->obj_field(detailMessage_offset); @@ -1546,10 +1538,12 @@ return method != NULL && (method->constants()->version() == version); } + // This class provides a simple wrapper over the internal structure of // exception backtrace to insulate users of the backtrace from needing // to know what it looks like. class BacktraceBuilder: public StackObj { + friend class BacktraceIterator; private: Handle _backtrace; objArrayOop _head; @@ -1560,8 +1554,6 @@ int _index; NoSafepointVerifier _nsv; - public: - enum { trace_methods_offset = java_lang_Throwable::trace_methods_offset, trace_bcis_offset = java_lang_Throwable::trace_bcis_offset, @@ -1594,6 +1586,8 @@ return cprefs; } + public: + // constructor for new backtrace BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) { expand(CHECK); @@ -1679,9 +1673,68 @@ }; +struct BacktraceElement : public StackObj { + int _method_id; + int _bci; + int _version; + int _cpref; + Handle _mirror; + BacktraceElement(Handle mirror, int mid, int version, int bci, int cpref) : + _mirror(mirror), _method_id(mid), _version(version), _bci(bci), _cpref(cpref) {} +}; + +class BacktraceIterator : public StackObj { + int _index; + objArrayHandle _result; + objArrayHandle _mirrors; + typeArrayHandle _methods; + typeArrayHandle _bcis; + typeArrayHandle _cprefs; + + void init(objArrayHandle result, Thread* thread) { + // Get method id, bci, version and mirror from chunk + _result = result; + if (_result.not_null()) { + _methods = typeArrayHandle(thread, BacktraceBuilder::get_methods(_result)); + _bcis = typeArrayHandle(thread, BacktraceBuilder::get_bcis(_result)); + _mirrors = objArrayHandle(thread, BacktraceBuilder::get_mirrors(_result)); + _cprefs = typeArrayHandle(thread, BacktraceBuilder::get_cprefs(_result)); + _index = 0; + } + } + public: + BacktraceIterator(objArrayHandle result, Thread* thread) { + init(result, thread); + assert(_methods.is_null() || _methods->length() == java_lang_Throwable::trace_chunk_size, "lengths don't match"); + } + + BacktraceElement next(Thread* thread) { + BacktraceElement e (Handle(thread, _mirrors->obj_at(_index)), + _methods->short_at(_index), + Backtrace::version_at(_bcis->int_at(_index)), + Backtrace::bci_at(_bcis->int_at(_index)), + _cprefs->short_at(_index)); + _index++; + + if (_index >= java_lang_Throwable::trace_chunk_size) { + int next_offset = java_lang_Throwable::trace_next_offset; + // Get next chunk + objArrayHandle result (thread, objArrayOop(_result->obj_at(next_offset))); + init(result, thread); + } + return e; + } + + bool repeat() { + return _result.not_null() && _mirrors->obj_at(_index) != NULL; + } +}; + + // Print stack trace element to resource allocated buffer -char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror, - int method_id, int version, int bci, int cpref) { +static void print_stack_element_to_stream(outputStream* st, Handle mirror, int method_id, + int version, int bci, int cpref) { + ResourceMark rm; // Get strings and string lengths InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror())); @@ -1752,13 +1805,6 @@ } } - return buf; -} - -void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror, - int method_id, int version, int bci, int cpref) { - ResourceMark rm; - char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref); st->print_cr("%s", buf); } @@ -1767,11 +1813,7 @@ int method_id = method->orig_method_idnum(); int version = method->constants()->version(); int cpref = method->name_index(); - print_stack_element(st, mirror, method_id, version, bci, cpref); -} - -const char* java_lang_Throwable::no_stack_trace_message() { - return "\t<>"; + print_stack_element_to_stream(st, mirror, method_id, version, bci, cpref); } /** @@ -1788,32 +1830,17 @@ while (throwable.not_null()) { objArrayHandle result (THREAD, objArrayOop(backtrace(throwable()))); if (result.is_null()) { - st->print_raw_cr(no_stack_trace_message()); + st->print_raw_cr("\t<>"); return; } - - while (result.not_null()) { - // Get method id, bci, version and mirror from chunk - typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result)); - typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result)); - objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result)); - typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result)); - - int length = methods()->length(); - for (int index = 0; index < length; index++) { - Handle mirror(THREAD, mirrors->obj_at(index)); - // NULL mirror means end of stack trace - if (mirror.is_null()) goto handle_cause; - int method = methods->short_at(index); - int version = Backtrace::version_at(bcis->int_at(index)); - int bci = Backtrace::bci_at(bcis->int_at(index)); - int cpref = cprefs->short_at(index); - print_stack_element(st, mirror, method, version, bci, cpref); - } - result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset))); + BacktraceIterator iter(result, THREAD); + + while (iter.repeat()) { + BacktraceElement bte = iter.next(THREAD); + print_stack_element_to_stream(st, bte._mirror, bte._method_id, bte._version, bte._bci, bte._cpref); } - handle_cause: { + // Call getCause() which doesn't necessarily return the _cause field. EXCEPTION_MARK; JavaValue cause(T_OBJECT); JavaCalls::call_virtual(&cause, @@ -1865,6 +1892,7 @@ int max_depth = MaxJavaStackTraceDepth; JavaThread* thread = (JavaThread*)THREAD; + BacktraceBuilder bt(CHECK); // If there is no Java frame just return the method that was being called @@ -1872,6 +1900,8 @@ if (!thread->has_last_Java_frame()) { if (max_depth >= 1 && method() != NULL) { bt.push(method(), 0, CHECK); + log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), 1); + set_depth(throwable(), 1); set_backtrace(throwable(), bt.backtrace()); } return; @@ -1979,8 +2009,11 @@ total_count++; } + log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), total_count); + // Put completed stack trace into throwable object set_backtrace(throwable(), bt.backtrace()); + set_depth(throwable(), total_count); } void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHandle& method) { @@ -2034,94 +2067,60 @@ // methods as preallocated errors aren't created by "java" code. // fill in as much stack trace as possible - typeArrayOop methods = BacktraceBuilder::get_methods(backtrace); - int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth); int chunk_count = 0; - for (;!st.at_end(); st.next()) { bt.push(st.method(), st.bci(), CHECK); chunk_count++; // Bail-out for deep stacks - if (chunk_count >= max_chunks) break; + if (chunk_count >= trace_chunk_size) break; } + set_depth(throwable(), chunk_count); + log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), chunk_count); // We support the Throwable immutability protocol defined for Java 7. java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace()); assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized"); } - -int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) { - if (throwable == NULL) { - THROW_0(vmSymbols::java_lang_NullPointerException()); +void java_lang_Throwable::get_stack_trace_elements(Handle throwable, + objArrayHandle stack_trace_array_h, TRAPS) { + + if (throwable.is_null() || stack_trace_array_h.is_null()) { + THROW(vmSymbols::java_lang_NullPointerException()); } - objArrayOop chunk = objArrayOop(backtrace(throwable)); - int depth = 0; - if (chunk != NULL) { - // Iterate over chunks and count full ones - while (true) { - objArrayOop next = objArrayOop(chunk->obj_at(trace_next_offset)); - if (next == NULL) break; - depth += trace_chunk_size; - chunk = next; - } - assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check"); - // Count element in remaining partial chunk. NULL value for mirror - // marks the end of the stack trace elements that are saved. - objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk); - assert(mirrors != NULL, "sanity check"); - for (int i = 0; i < mirrors->length(); i++) { - if (mirrors->obj_at(i) == NULL) break; - depth++; - } - } - return depth; -} - - -oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS) { - if (throwable == NULL) { - THROW_0(vmSymbols::java_lang_NullPointerException()); - } - if (index < 0) { - THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL); + + assert(stack_trace_array_h->is_objArray(), "Stack trace array should be an array of StackTraceElenent"); + + if (stack_trace_array_h->length() != depth(throwable())) { + THROW(vmSymbols::java_lang_IndexOutOfBoundsException()); } - // Compute how many chunks to skip and index into actual chunk - objArrayOop chunk = objArrayOop(backtrace(throwable)); - int skip_chunks = index / trace_chunk_size; - int chunk_index = index % trace_chunk_size; - while (chunk != NULL && skip_chunks > 0) { - chunk = objArrayOop(chunk->obj_at(trace_next_offset)); - skip_chunks--; - } - if (chunk == NULL) { - THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL); + + objArrayHandle result(THREAD, objArrayOop(backtrace(throwable()))); + BacktraceIterator iter(result, THREAD); + + int index = 0; + while (iter.repeat()) { + BacktraceElement bte = iter.next(THREAD); + + Handle stack_trace_element(THREAD, stack_trace_array_h->obj_at(index++)); + + if (stack_trace_element.is_null()) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + + InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(bte._mirror())); + methodHandle method (THREAD, holder->method_with_orig_idnum(bte._method_id, bte._version)); + + java_lang_StackTraceElement::fill_in(stack_trace_element, holder, + method, + bte._version, + bte._bci, + bte._cpref, CHECK); } - // Get method id, bci, version, mirror and cpref from chunk - typeArrayOop methods = BacktraceBuilder::get_methods(chunk); - typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk); - objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk); - typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk); - - assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check"); - - int method = methods->short_at(chunk_index); - int version = Backtrace::version_at(bcis->int_at(chunk_index)); - int bci = Backtrace::bci_at(bcis->int_at(chunk_index)); - int cpref = cprefs->short_at(chunk_index); - Handle mirror(THREAD, mirrors->obj_at(chunk_index)); - - // Chunk can be partial full - if (mirror.is_null()) { - THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL); - } - oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0); - return element; -} - -oop java_lang_StackTraceElement::create(Handle mirror, int method_id, - int version, int bci, int cpref, TRAPS) { +} + +oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) { // Allocate java.lang.StackTraceElement instance Klass* k = SystemDictionary::StackTraceElement_klass(); assert(k != NULL, "must be loaded in 1.4+"); @@ -2132,37 +2131,45 @@ Handle element = ik->allocate_instance_handle(CHECK_0); + int cpref = method->name_index(); + int version = method->constants()->version(); + fill_in(element, method->method_holder(), method, version, bci, cpref, CHECK_0); + return element(); +} + +void java_lang_StackTraceElement::fill_in(Handle element, + InstanceKlass* holder, const methodHandle& method, + int version, int bci, int cpref, TRAPS) { + assert(element->is_a(SystemDictionary::StackTraceElement_klass()), "sanity check"); + // Fill in class name ResourceMark rm(THREAD); - InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror())); const char* str = holder->external_name(); - oop classname = StringTable::intern((char*) str, CHECK_0); + oop classname = StringTable::intern((char*) str, CHECK); java_lang_StackTraceElement::set_declaringClass(element(), classname); - Method* method = holder->method_with_orig_idnum(method_id, version); - // The method can be NULL if the requested class version is gone - Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref); + Symbol* sym = !method.is_null() ? method->name() : holder->constants()->symbol_at(cpref); // Fill in method name - oop methodname = StringTable::intern(sym, CHECK_0); + oop methodname = StringTable::intern(sym, CHECK); java_lang_StackTraceElement::set_methodName(element(), methodname); // Fill in module name and version ModuleEntry* module = holder->module(); if (module->is_named()) { - oop module_name = StringTable::intern(module->name(), CHECK_0); + oop module_name = StringTable::intern(module->name(), CHECK); java_lang_StackTraceElement::set_moduleName(element(), module_name); oop module_version; if (module->version() != NULL) { - module_version = StringTable::intern(module->version(), CHECK_0); + module_version = StringTable::intern(module->version(), CHECK); } else { module_version = NULL; } java_lang_StackTraceElement::set_moduleVersion(element(), module_version); } - if (!version_matches(method, version)) { + if (!version_matches(method(), version)) { // The method was redefined, accurate line number information isn't available java_lang_StackTraceElement::set_fileName(element(), NULL); java_lang_StackTraceElement::set_lineNumber(element(), -1); @@ -2171,20 +2178,12 @@ Symbol* source = Backtrace::get_source_file_name(holder, version); if (ShowHiddenFrames && source == NULL) source = vmSymbols::unknown_class_name(); - oop filename = StringTable::intern(source, CHECK_0); + oop filename = StringTable::intern(source, CHECK); java_lang_StackTraceElement::set_fileName(element(), filename); int line_number = Backtrace::get_line_number(method, bci); java_lang_StackTraceElement::set_lineNumber(element(), line_number); } - return element(); -} - -oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) { - Handle mirror (THREAD, method->method_holder()->java_mirror()); - int method_id = method->orig_method_idnum(); - int cpref = method->name_index(); - return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD); } Method* java_lang_StackFrameInfo::get_method(Handle stackFrame, InstanceKlass* holder, TRAPS) { @@ -2751,7 +2750,7 @@ field->obj_field_put(type_annotations_offset, value); } -void sun_reflect_ConstantPool::compute_offsets() { +void reflect_ConstantPool::compute_offsets() { Klass* k = SystemDictionary::reflect_ConstantPool_klass(); // This null test can be removed post beta if (k != NULL) { @@ -2895,7 +2894,7 @@ module->address_field_put(_module_entry_offset, (address)module_entry); } -Handle sun_reflect_ConstantPool::create(TRAPS) { +Handle reflect_ConstantPool::create(TRAPS) { assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem"); Klass* k = SystemDictionary::reflect_ConstantPool_klass(); instanceKlassHandle klass (THREAD, k); @@ -2905,14 +2904,14 @@ } -void sun_reflect_ConstantPool::set_cp(oop reflect, ConstantPool* value) { +void reflect_ConstantPool::set_cp(oop reflect, ConstantPool* value) { assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem"); oop mirror = value->pool_holder()->java_mirror(); // Save the mirror to get back the constant pool. reflect->obj_field_put(_oop_offset, mirror); } -ConstantPool* sun_reflect_ConstantPool::get_cp(oop reflect) { +ConstantPool* reflect_ConstantPool::get_cp(oop reflect) { assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem"); oop mirror = reflect->obj_field(_oop_offset); @@ -2927,7 +2926,7 @@ return InstanceKlass::cast(k)->constants(); } -void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() { +void reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() { Klass* k = SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(); // This null test can be removed post beta if (k != NULL) { @@ -3629,8 +3628,8 @@ GrowableArray* java_lang_Class::_fixup_module_field_list = NULL; int java_lang_Throwable::backtrace_offset; int java_lang_Throwable::detailMessage_offset; -int java_lang_Throwable::cause_offset; int java_lang_Throwable::stackTrace_offset; +int java_lang_Throwable::depth_offset; int java_lang_Throwable::static_unassigned_stacktrace_offset; int java_lang_reflect_AccessibleObject::override_offset; int java_lang_reflect_Method::clazz_offset; @@ -3707,8 +3706,8 @@ int java_lang_AssertionStatusDirectives::deflt_offset; int java_nio_Buffer::_limit_offset; int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0; -int sun_reflect_ConstantPool::_oop_offset; -int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset; +int reflect_ConstantPool::_oop_offset; +int reflect_UnsafeStaticFieldAccessorImpl::_base_offset; // Support for java_lang_StackTraceElement @@ -3841,7 +3840,6 @@ // Throwable Class java_lang_Throwable::backtrace_offset = java_lang_Throwable::hc_backtrace_offset * x + header; java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header; - java_lang_Throwable::cause_offset = java_lang_Throwable::hc_cause_offset * x + header; java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header; java_lang_Throwable::static_unassigned_stacktrace_offset = java_lang_Throwable::hc_static_unassigned_stacktrace_offset * x; @@ -3894,6 +3892,7 @@ void JavaClasses::compute_offsets() { // java_lang_Class::compute_offsets was called earlier in bootstrap java_lang_ClassLoader::compute_offsets(); + java_lang_Throwable::compute_offsets(); java_lang_Thread::compute_offsets(); java_lang_ThreadGroup::compute_offsets(); java_lang_invoke_MethodHandle::compute_offsets(); @@ -3913,8 +3912,8 @@ java_lang_reflect_Constructor::compute_offsets(); java_lang_reflect_Field::compute_offsets(); java_nio_Buffer::compute_offsets(); - sun_reflect_ConstantPool::compute_offsets(); - sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); + reflect_ConstantPool::compute_offsets(); + reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); java_lang_reflect_Parameter::compute_offsets(); java_lang_reflect_Module::compute_offsets(); java_lang_StackFrameInfo::compute_offsets(); @@ -4048,8 +4047,8 @@ CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, backtrace, "Ljava/lang/Object;"); CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;"); - CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;"); CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;"); + CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, depth, "I"); // Boxed primitive objects (java_lang_boxing_object) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/javaClasses.hpp --- a/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -155,11 +155,6 @@ } static unsigned int hash_code(oop java_string); - static unsigned int latin1_hash_code(typeArrayOop value, int len); - - // This is the string hash code used by the StringTable, which may be - // the same as String.hashCode or an alternate hash code. - static unsigned int hash_string(oop java_string); static bool equals(oop java_string, jchar* chars, int len); static bool equals(oop str1, oop str2); @@ -456,6 +451,7 @@ class java_lang_Throwable: AllStatic { friend class BacktraceBuilder; + friend class BacktraceIterator; private: // Offsets @@ -481,16 +477,12 @@ static int backtrace_offset; static int detailMessage_offset; - static int cause_offset; static int stackTrace_offset; + static int depth_offset; static int static_unassigned_stacktrace_offset; - // Printing - static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref); // StackTrace (programmatic access, new since 1.4) static void clear_stacktrace(oop throwable); - // No stack trace available - static const char* no_stack_trace_message(); // Stacktrace (post JDK 1.7.0 to allow immutability protocol to be followed) static void set_stacktrace(oop throwable, oop st_element_array); static oop unassigned_stacktrace(); @@ -499,19 +491,20 @@ // Backtrace static oop backtrace(oop throwable); static void set_backtrace(oop throwable, oop value); + static int depth(oop throwable); + static void set_depth(oop throwable, int value); // Needed by JVMTI to filter out this internal field. static int get_backtrace_offset() { return backtrace_offset;} static int get_detailMessage_offset() { return detailMessage_offset;} // Message - static oop message(oop throwable); static oop message(Handle throwable); static void set_message(oop throwable, oop value); static Symbol* detail_message(oop throwable); - static void print_stack_element(outputStream *st, Handle mirror, int method, - int version, int bci, int cpref); static void print_stack_element(outputStream *st, const methodHandle& method, int bci); static void print_stack_usage(Handle stream); + static void compute_offsets(); + // Allocate space for backtrace (created but stack trace not filled in) static void allocate_backtrace(Handle throwable, TRAPS); // Fill in current stack trace for throwable with preallocated backtrace (no GC) @@ -520,8 +513,7 @@ static void fill_in_stack_trace(Handle throwable, const methodHandle& method, TRAPS); static void fill_in_stack_trace(Handle throwable, const methodHandle& method = methodHandle()); // Programmatic access to stack trace - static oop get_stack_trace_element(oop throwable, int index, TRAPS); - static int get_stack_trace_depth(oop throwable, TRAPS); + static void get_stack_trace_elements(Handle throwable, objArrayHandle stack_trace, TRAPS); // Printing static void print(Handle throwable, outputStream* st); static void print_stack_trace(Handle throwable, outputStream* st); @@ -807,8 +799,8 @@ friend class JavaClasses; }; -// Interface to sun.reflect.ConstantPool objects -class sun_reflect_ConstantPool { +// Interface to jdk.internal.reflect.ConstantPool objects +class reflect_ConstantPool { private: // Note that to reduce dependencies on the JDK we compute these // offsets at run-time. @@ -832,8 +824,8 @@ friend class JavaClasses; }; -// Interface to sun.reflect.UnsafeStaticFieldAccessorImpl objects -class sun_reflect_UnsafeStaticFieldAccessorImpl { +// Interface to jdk.internal.reflect.UnsafeStaticFieldAccessorImpl objects +class reflect_UnsafeStaticFieldAccessorImpl { private: static int _base_offset; static void compute_offsets(); @@ -1333,7 +1325,6 @@ static int fileName_offset; static int lineNumber_offset; - public: // Setters static void set_moduleName(oop element, oop value); static void set_moduleVersion(oop element, oop value); @@ -1342,10 +1333,13 @@ static void set_fileName(oop element, oop value); static void set_lineNumber(oop element, int value); + public: // Create an instance of StackTraceElement - static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS); static oop create(const methodHandle& method, int bci, TRAPS); + static void fill_in(Handle element, InstanceKlass* holder, const methodHandle& method, + int version, int bci, int cpref, TRAPS); + // Debugging friend class JavaClasses; }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/loaderConstraints.cpp --- a/hotspot/src/share/vm/classfile/loaderConstraints.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,13 +111,14 @@ if (klass != NULL && klass->class_loader_data()->is_unloading()) { probe->set_klass(NULL); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, constraints)) { ResourceMark rm; - tty->print_cr("[Purging class object from constraint for name %s," + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("purging class object from constraint for name %s," " loader list:", probe->name()->as_C_string()); for (int i = 0; i < probe->num_loaders(); i++) { - tty->print_cr("[ [%d]: %s", i, + out->print_cr(" [%d]: %s", i, probe->loader_data(i)->loader_name()); } } @@ -126,9 +127,10 @@ int n = 0; while (n < probe->num_loaders()) { if (probe->loader_data(n)->is_unloading()) { - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print_cr("[Purging loader %s from constraint for name %s", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("purging loader %s from constraint for name %s", probe->loader_data(n)->loader_name(), probe->name()->as_C_string() ); @@ -140,11 +142,12 @@ probe->set_loader_data(n, probe->loader_data(num)); probe->set_loader_data(num, NULL); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print_cr("[New loader list:"); + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("new loader list:"); for (int i = 0; i < probe->num_loaders(); i++) { - tty->print_cr("[ [%d]: %s", i, + out->print_cr(" [%d]: %s", i, probe->loader_data(i)->loader_name()); } } @@ -156,9 +159,10 @@ } // Check whether entry should be purged if (probe->num_loaders() < 2) { - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Purging complete constraint for name %s\n", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("purging complete constraint for name %s", probe->name()->as_C_string()); } @@ -227,10 +231,11 @@ p->set_klass(klass); p->set_next(bucket(index)); set_entry(index, p); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Adding new constraint for name: %s, loader[0]: %s," - " loader[1]: %s ]\n", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("adding new constraint for name: %s, loader[0]: %s," + " loader[1]: %s", class_name->as_C_string(), SystemDictionary::loader_name(class_loader1()), SystemDictionary::loader_name(class_loader2()) @@ -240,10 +245,11 @@ /* constraint already imposed */ if ((*pp1)->klass() == NULL) { (*pp1)->set_klass(klass); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Setting class object in existing constraint for" - " name: %s and loader %s ]\n", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("setting class object in existing constraint for" + " name: %s and loader %s", class_name->as_C_string(), SystemDictionary::loader_name(class_loader1()) ); @@ -261,8 +267,9 @@ } } - if (failure_code != 0 && TraceLoaderConstraints) { + if (failure_code != 0 && log_is_enabled(Info, classload, constraints)) { ResourceMark rm; + outputStream* out = Log(classload, constraints)::info_stream(); const char* reason = ""; switch(failure_code) { case 1: reason = "the class objects presented by loader[0] and loader[1]" @@ -273,8 +280,8 @@ " the stored class object in the constraint"; break; default: reason = "unknown reason code"; } - tty->print("[Failed to add constraint for name: %s, loader[0]: %s," - " loader[1]: %s, Reason: %s ]\n", + out->print_cr("failed to add constraint for name: %s, loader[0]: %s," + " loader[1]: %s, Reason: %s", class_name->as_C_string(), SystemDictionary::loader_name(class_loader1()), SystemDictionary::loader_name(class_loader2()), @@ -293,10 +300,11 @@ Symbol* name) { LoaderConstraintEntry* p = *(find_loader_constraint(name, loader)); if (p && p->klass() != NULL && p->klass() != k()) { - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Constraint check failed for name %s, loader %s: " - "the presented class object differs from that stored ]\n", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("constraint check failed for name %s, loader %s: " + "the presented class object differs from that stored", name->as_C_string(), SystemDictionary::loader_name(loader())); } @@ -304,10 +312,11 @@ } else { if (p && p->klass() == NULL) { p->set_klass(k()); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Updating constraint for name %s, loader %s, " - "by setting class object ]\n", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("updating constraint for name %s, loader %s, " + "by setting class object", name->as_C_string(), SystemDictionary::loader_name(loader())); } @@ -353,13 +362,14 @@ int num = p->num_loaders(); p->set_loader(num, loader()); p->set_num_loaders(num + 1); - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print("[Extending constraint for name %s by adding loader[%d]: %s %s", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("extending constraint for name %s by adding loader[%d]: %s %s", p->name()->as_C_string(), num, SystemDictionary::loader_name(loader()), - (p->klass() == NULL ? " and setting class object ]\n" : " ]\n") + (p->klass() == NULL ? " and setting class object" : "") ); } if (p->klass() == NULL) { @@ -392,18 +402,19 @@ p1->set_num_loaders(num + 1); } - if (TraceLoaderConstraints) { + if (log_is_enabled(Info, classload, constraints)) { ResourceMark rm; - tty->print_cr("[Merged constraints for name %s, new loader list:", + outputStream* out = Log(classload, constraints)::info_stream(); + out->print_cr("merged constraints for name %s, new loader list:", p1->name()->as_C_string() ); for (int i = 0; i < p1->num_loaders(); i++) { - tty->print_cr("[ [%d]: %s", i, + out->print_cr(" [%d]: %s", i, p1->loader_data(i)->loader_name()); } if (p1->klass() == NULL) { - tty->print_cr("[... and setting class object]"); + out->print_cr("... and setting class object"); } } @@ -473,7 +484,6 @@ // Called with the system dictionary lock held void LoaderConstraintTable::print() { ResourceMark rm; - assert_locked_or_safepoint(SystemDictionary_lock); tty->print_cr("Java loader constraints (entries=%d)", _loader_constraint_size); for (int cindex = 0; cindex < _loader_constraint_size; cindex++) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/modules.cpp --- a/hotspot/src/share/vm/classfile/modules.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/modules.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -36,6 +36,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/vmSymbols.hpp" #include "logging/log.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" @@ -480,7 +481,7 @@ } if (log_is_enabled(Debug, modules)) { - outputStream* logst = LogHandle(modules)::debug_stream(); + outputStream* logst = Log(modules)::debug_stream(); logst->print("define_module(): creation of module: %s, version: %s, location: %s, ", module_name, module_version != NULL ? module_version : "NULL", module_location != NULL ? module_location : "NULL"); @@ -789,7 +790,7 @@ if (log_is_enabled(Debug, modules)) { ResourceMark rm(THREAD); - outputStream* logst = LogHandle(modules)::debug_stream(); + outputStream* logst = Log(modules)::debug_stream(); Klass* klass = java_lang_Class::as_Klass(mirror); oop module_name = java_lang_reflect_Module::name(module); if (module_name != NULL) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp --- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/resourceArea.hpp" #include "runtime/arguments.hpp" #include "utilities/ostream.hpp" @@ -74,7 +75,7 @@ void SharedPathsMiscInfo::print_path(int type, const char* path) { ResourceMark rm; - outputStream* out = LogHandle(classpath)::info_stream(); + outputStream* out = Log(classpath)::info_stream(); switch (type) { case BOOT: out->print("Expecting BOOT path=%s", path); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/stringTable.cpp --- a/hotspot/src/share/vm/classfile/stringTable.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/stringTable.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "gc/shared/gcLocker.inline.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -94,15 +95,27 @@ CompactHashtable StringTable::_shared_table; // Pick hashing algorithm -template -unsigned int StringTable::hash_string(const T* s, int len) { +unsigned int StringTable::hash_string(const jchar* s, int len) { return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) : java_lang_String::hash_code(s, len); } -// Explicit instantiation for all supported types. -template unsigned int StringTable::hash_string(const jchar* s, int len); -template unsigned int StringTable::hash_string(const jbyte* s, int len); +unsigned int StringTable::hash_string(oop string) { + EXCEPTION_MARK; + if (string == NULL) { + return hash_string((jchar*)NULL, 0); + } + ResourceMark rm(THREAD); + // All String oops are hashed as unicode + int length; + jchar* chars = java_lang_String::as_unicode_string(string, length, THREAD); + if (chars != NULL) { + return hash_string(chars, length); + } else { + vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode string for verification"); + return 0; + } +} oop StringTable::lookup_shared(jchar* name, int len) { // java_lang_String::hash_code() was used to compute hash values in the shared table. Don't @@ -398,7 +411,7 @@ for ( ; p != NULL; p = p->next()) { oop s = p->literal(); guarantee(s != NULL, "interned string is NULL"); - unsigned int h = java_lang_String::hash_string(s); + unsigned int h = hash_string(s); guarantee(p->hash() == h, "broken hash in string table entry"); guarantee(the_table()->hash_to_index(h) == i, "wrong index in string table"); @@ -498,7 +511,7 @@ return _verify_fail_done; } - unsigned int h = java_lang_String::hash_string(str); + unsigned int h = hash_string(str); if (e_ptr->hash() != h) { if (mesg_mode == _verify_with_mesgs) { tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], " diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/stringTable.hpp --- a/hotspot/src/share/vm/classfile/stringTable.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/stringTable.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,8 @@ // Hashing algorithm, used as the hash value used by the // StringTable for bucket selection and comparison (stored in the // HashtableEntry structures). This is used in the String.intern() method. - template static unsigned int hash_string(const T* s, int len); + static unsigned int hash_string(const jchar* s, int len); + static unsigned int hash_string(oop string); // Internal test. static void test_alt_hash() PRODUCT_RETURN; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/symbolTable.cpp --- a/hotspot/src/share/vm/classfile/symbolTable.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "gc/shared/gcLocker.inline.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -160,6 +161,11 @@ // Create a new table and using alternate hash code, populate the new table // with the existing strings. Set flag to use the alternate hash code afterwards. void SymbolTable::rehash_table() { + if (DumpSharedSpaces) { + tty->print_cr("Warning: rehash_table should not be called while dumping archive"); + return; + } + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // This should never happen with -Xshare:dump but it might in testing mode. if (DumpSharedSpaces) return; @@ -201,6 +207,11 @@ Symbol* SymbolTable::lookup_shared(const char* name, int len, unsigned int hash) { + if (use_alternate_hashcode()) { + // hash_code parameter may use alternate hashing algorithm but the shared table + // always uses the same original hash code. + hash = hash_shared_symbol(name, len); + } return _shared_table.lookup(name, hash, len); } @@ -234,6 +245,10 @@ java_lang_String::hash_code((const jbyte*)s, len); } +unsigned int SymbolTable::hash_shared_symbol(const char* s, int len) { + return java_lang_String::hash_code((const jbyte*)s, len); +} + // We take care not to be blocking while holding the // SymbolTable_lock. Otherwise, the system might deadlock, since the @@ -536,7 +551,7 @@ HashtableEntry* p = the_table()->bucket(i); for ( ; p != NULL; p = p->next()) { Symbol* s = (Symbol*)(p->literal()); - unsigned int fixed_hash = hash_symbol((char*)s->bytes(), s->utf8_length()); + unsigned int fixed_hash = hash_shared_symbol((char*)s->bytes(), s->utf8_length()); assert(fixed_hash == p->hash(), "must not rehash during dumping"); ch_table.add(fixed_hash, s); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/symbolTable.hpp --- a/hotspot/src/share/vm/classfile/symbolTable.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/symbolTable.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -175,6 +175,7 @@ } static unsigned int hash_symbol(const char* s, int len); + static unsigned int hash_shared_symbol(const char* s, int len); static Symbol* lookup(const char* name, int len, TRAPS); // lookup only, won't add. Also calculate hash. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/systemDictionary.cpp --- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -45,6 +45,7 @@ #include "interpreter/interpreter.hpp" #include "memory/filemap.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/klass.inline.hpp" @@ -67,6 +68,7 @@ #include "runtime/signature.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" +#include "trace/traceMacros.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" #if INCLUDE_CDS @@ -435,7 +437,7 @@ if (log_is_enabled(Debug, protectiondomain)) { ResourceMark rm; // Print out trace information - outputStream* log = LogHandle(protectiondomain)::debug_stream(); + outputStream* log = Log(protectiondomain)::debug_stream(); log->print_cr("Checking package access"); log->print("class loader: "); class_loader()->print_value_on(log); log->print(" protection domain: "); protection_domain()->print_value_on(log); @@ -1650,6 +1652,8 @@ } + TRACE_KLASS_DEFINITION(k, THREAD); + } // Support parallel classloading @@ -2063,7 +2067,18 @@ int sid = (info >> CEIL_LG_OPTION_LIMIT); Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid); InstanceKlass** klassp = &_well_known_klasses[id]; - bool must_load = (init_opt < SystemDictionary::Opt); + + bool must_load; +#if INCLUDE_JVMCI + if (EnableJVMCI) { + // If JVMCI is enabled we require its classes to be found. + must_load = (init_opt < SystemDictionary::Opt) || (init_opt == SystemDictionary::Jvmci); + } else +#endif + { + must_load = (init_opt < SystemDictionary::Opt); + } + if ((*klassp) == NULL) { Klass* k; if (must_load) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/systemDictionary.hpp --- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -142,13 +142,13 @@ \ /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ - do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \ - do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Pre ) \ - do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Pre ) \ - do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \ - do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt ) \ - do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt ) \ - do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \ + do_klass(reflect_MagicAccessorImpl_klass, reflect_MagicAccessorImpl, Opt ) \ + do_klass(reflect_MethodAccessorImpl_klass, reflect_MethodAccessorImpl, Pre ) \ + do_klass(reflect_ConstructorAccessorImpl_klass, reflect_ConstructorAccessorImpl, Pre ) \ + do_klass(reflect_DelegatingClassLoader_klass, reflect_DelegatingClassLoader, Opt ) \ + do_klass(reflect_ConstantPool_klass, reflect_ConstantPool, Opt ) \ + do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, reflect_UnsafeStaticFieldAccessorImpl, Opt ) \ + do_klass(reflect_CallerSensitive_klass, reflect_CallerSensitive, Opt ) \ \ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \ @@ -241,7 +241,7 @@ Opt, // preload tried; NULL if not present #if INCLUDE_JVMCI - Jvmci, // preload tried; error if not present, use only with JVMCI + Jvmci, // preload tried; error if not present if JVMCI enabled #endif OPTION_LIMIT, CEIL_LG_OPTION_LIMIT = 2 // OPTION_LIMIT <= (1<class_loader()), Handle(THREAD, klass->protection_domain()), true, CHECK_false); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { Verifier::trace_class_resolution(obj, klass()); } @@ -80,7 +80,7 @@ Klass* from_class = SystemDictionary::resolve_or_fail( from.name(), Handle(THREAD, klass->class_loader()), Handle(THREAD, klass->protection_domain()), true, CHECK_false); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { Verifier::trace_class_resolution(from_class, klass()); } return InstanceKlass::cast(from_class)->is_subclass_of(this_class()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/verifier.cpp --- a/hotspot/src/share/vm/classfile/verifier.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/verifier.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,6 +33,7 @@ #include "classfile/vmSymbols.hpp" #include "interpreter/bytecodes.hpp" #include "interpreter/bytecodeStream.hpp" +#include "logging/log.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" @@ -106,9 +107,9 @@ const char* resolve = resolve_class->external_name(); // print in a single call to reduce interleaving between threads if (source_file != NULL) { - log_info(classresolve)("%s %s %s (verification)", verify, resolve, source_file); + log_debug(classresolve)("%s %s %s (verification)", verify, resolve, source_file); } else { - log_info(classresolve)("%s %s (verification)", verify, resolve); + log_debug(classresolve)("%s %s (verification)", verify, resolve); } } @@ -176,9 +177,7 @@ if (can_failover && !HAS_PENDING_EXCEPTION && (exception_name == vmSymbols::java_lang_VerifyError() || exception_name == vmSymbols::java_lang_ClassFormatError())) { - if (VerboseVerification) { - tty->print_cr("Fail over class verification to old verifier for: %s", klassName); - } + log_info(verification)("Fail over class verification to old verifier for: %s", klassName); log_info(classinit)("Fail over class verification to old verifier for: %s", klassName); exception_name = inference_verify( klass, message_buffer, message_buffer_len, THREAD); @@ -192,10 +191,10 @@ } if (log_is_enabled(Info, classinit)){ - log_end_verification(LogHandle(classinit)::info_stream(), klassName, exception_name, THREAD); + log_end_verification(Log(classinit)::info_stream(), klassName, exception_name, THREAD); } - if (VerboseVerification){ - log_end_verification(tty, klassName, exception_name, THREAD); + if (log_is_enabled(Info, verification)){ + log_end_verification(Log(verification)::info_stream(), klassName, exception_name, THREAD); } if (HAS_PENDING_EXCEPTION) { @@ -206,7 +205,7 @@ ResourceMark rm(THREAD); instanceKlassHandle kls = SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { Verifier::trace_class_resolution(kls(), klass()); } @@ -249,7 +248,7 @@ // As of the fix for 4486457 we disable verification for all of the // dynamically-generated bytecodes associated with the 1.4 // reflection implementation, not just those associated with - // sun/reflect/SerializationConstructorAccessor. + // jdk/internal/reflect/SerializationConstructorAccessor. // NOTE: this is called too early in the bootstrapping process to be // guarded by Universe::is_gte_jdk14x_version(). // Also for lambda generated code, gte jdk8 @@ -269,9 +268,7 @@ } ResourceMark rm(THREAD); - if (VerboseVerification) { - tty->print_cr("Verifying class %s with old format", klass->external_name()); - } + log_info(verification)("Verifying class %s with old format", klass->external_name()); jclass cls = (jclass) JNIHandles::make_local(env, klass->java_mirror()); jint result; @@ -583,10 +580,7 @@ } void ClassVerifier::verify_class(TRAPS) { - if (VerboseVerification) { - tty->print_cr("Verifying class %s with new format", - _klass->external_name()); - } + log_info(verification)("Verifying class %s with new format", _klass->external_name()); Array* methods = _klass->methods(); int num_methods = methods->length(); @@ -606,10 +600,7 @@ } if (was_recursively_verified()){ - if (VerboseVerification){ - tty->print_cr("Recursive verification detected for: %s", - _klass->external_name()); - } + log_info(verification)("Recursive verification detected for: %s", _klass->external_name()); log_info(classinit)("Recursive verification detected for: %s", _klass->external_name()); } @@ -618,9 +609,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) { HandleMark hm(THREAD); _method = m; // initialize _method - if (VerboseVerification) { - tty->print_cr("Verifying method %s", m->name_and_sig_as_C_string()); - } + log_info(verification)("Verifying method %s", m->name_and_sig_as_C_string()); // For clang, the only good constant format string is a literal constant format string. #define bad_type_msg "Bad type on operand stack in %s" @@ -667,8 +656,9 @@ StackMapTable stackmap_table(&reader, ¤t_frame, max_locals, max_stack, code_data, code_length, CHECK_VERIFY(this)); - if (VerboseVerification) { - stackmap_table.print_on(tty); + if (log_is_enabled(Info, verification)) { + ResourceMark rm(THREAD); + stackmap_table.print_on(Log(verification)::info_stream()); } RawBytecodeStream bcs(m); @@ -708,12 +698,11 @@ VerificationType type, type2; VerificationType atype; -#ifndef PRODUCT - if (VerboseVerification) { - current_frame.print_on(tty); - tty->print_cr("offset = %d, opcode = %s", bci, Bytecodes::name(opcode)); + if (log_is_enabled(Info, verification)) { + ResourceMark rm(THREAD); + current_frame.print_on(Log(verification)::info_stream()); + log_info(verification)("offset = %d, opcode = %s", bci, Bytecodes::name(opcode)); } -#endif // Make sure wide instruction is in correct format if (bcs.is_wide()) { @@ -2005,7 +1994,7 @@ name, Handle(THREAD, loader), Handle(THREAD, protection_domain), true, THREAD); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { instanceKlassHandle cur_class = current_class(); Verifier::trace_class_resolution(kls, cur_class()); } @@ -2533,11 +2522,10 @@ verify_error(ErrorContext::bad_code(bci), "Bad method call from after the start of a try block"); return; - } else if (VerboseVerification) { - ResourceMark rm; - tty->print_cr( - "Survived call to ends_in_athrow(): %s", - current_class()->name()->as_C_string()); + } else if (log_is_enabled(Info, verification)) { + ResourceMark rm(THREAD); + log_info(verification)("Survived call to ends_in_athrow(): %s", + current_class()->name()->as_C_string()); } } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/classfile/vmSymbols.hpp --- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -102,7 +102,6 @@ template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \ template(java_net_URL, "java/net/URL") \ template(java_util_jar_Manifest, "java/util/jar/Manifest") \ - template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \ template(java_io_OutputStream, "java/io/OutputStream") \ template(java_io_Reader, "java/io/Reader") \ template(java_io_BufferedReader, "java/io/BufferedReader") \ @@ -228,26 +227,20 @@ \ /* Support for reflection based on dynamic bytecode generation (JDK 1.4 and above) */ \ \ - template(sun_reflect_FieldInfo, "sun/reflect/FieldInfo") \ - template(sun_reflect_MethodInfo, "sun/reflect/MethodInfo") \ - template(sun_reflect_MagicAccessorImpl, "sun/reflect/MagicAccessorImpl") \ - template(sun_reflect_MethodAccessorImpl, "sun/reflect/MethodAccessorImpl") \ - template(sun_reflect_ConstructorAccessorImpl, "sun/reflect/ConstructorAccessorImpl") \ - template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \ - template(sun_reflect_DelegatingClassLoader, "sun/reflect/DelegatingClassLoader") \ - template(sun_reflect_Reflection, "sun/reflect/Reflection") \ - template(sun_reflect_CallerSensitive, "sun/reflect/CallerSensitive") \ - template(sun_reflect_CallerSensitive_signature, "Lsun/reflect/CallerSensitive;") \ + template(reflect_MagicAccessorImpl, "jdk/internal/reflect/MagicAccessorImpl") \ + template(reflect_MethodAccessorImpl, "jdk/internal/reflect/MethodAccessorImpl") \ + template(reflect_ConstructorAccessorImpl, "jdk/internal/reflect/ConstructorAccessorImpl") \ + template(reflect_DelegatingClassLoader, "jdk/internal/reflect/DelegatingClassLoader") \ + template(reflect_Reflection, "jdk/internal/reflect/Reflection") \ + template(reflect_CallerSensitive, "jdk/internal/reflect/CallerSensitive") \ + template(reflect_CallerSensitive_signature, "Ljdk/internal/reflect/CallerSensitive;") \ template(checkedExceptions_name, "checkedExceptions") \ template(clazz_name, "clazz") \ template(exceptionTypes_name, "exceptionTypes") \ template(modifiers_name, "modifiers") \ template(newConstructor_name, "newConstructor") \ - template(newConstructor_signature, "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Constructor;") \ template(newField_name, "newField") \ - template(newField_signature, "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \ template(newMethod_name, "newMethod") \ - template(newMethod_signature, "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \ template(invokeBasic_name, "invokeBasic") \ template(linkToVirtual_name, "linkToVirtual") \ template(linkToStatic_name, "linkToStatic") \ @@ -269,9 +262,9 @@ template(executable_name, "executable") \ template(parameter_annotations_name, "parameterAnnotations") \ template(annotation_default_name, "annotationDefault") \ - template(sun_reflect_ConstantPool, "sun/reflect/ConstantPool") \ + template(reflect_ConstantPool, "jdk/internal/reflect/ConstantPool") \ template(ConstantPool_name, "constantPoolOop") \ - template(sun_reflect_UnsafeStaticFieldAccessorImpl, "sun/reflect/UnsafeStaticFieldAccessorImpl")\ + template(reflect_UnsafeStaticFieldAccessorImpl, "jdk/internal/reflect/UnsafeStaticFieldAccessorImpl")\ template(base_name, "base") \ /* Type Annotations (JDK 8 and above) */ \ template(type_annotations_name, "typeAnnotations") \ @@ -327,7 +320,6 @@ template(java_lang_StackFrameInfo, "java/lang/StackFrameInfo") \ template(java_lang_LiveStackFrameInfo, "java/lang/LiveStackFrameInfo") \ template(java_lang_StackStreamFactory_AbstractStackWalker, "java/lang/StackStreamFactory$AbstractStackWalker") \ - template(doStackWalk_name, "doStackWalk") \ template(doStackWalk_signature, "(JIIII)Ljava/lang/Object;") \ template(asPrimitive_name, "asPrimitive") \ template(asPrimitive_int_signature, "(I)Ljava/lang/LiveStackFrame$PrimitiveValue;") \ @@ -378,14 +370,13 @@ template(type_name, "type") \ template(findNative_name, "findNative") \ template(deadChild_name, "deadChild") \ - template(addClass_name, "addClass") \ - template(throwIllegalAccessError_name, "throwIllegalAccessError") \ template(getFromClass_name, "getFromClass") \ template(dispatch_name, "dispatch") \ template(getSystemClassLoader_name, "getSystemClassLoader") \ template(fillInStackTrace_name, "fillInStackTrace") \ template(getCause_name, "getCause") \ template(initCause_name, "initCause") \ + template(depth_name, "depth") \ template(setProperty_name, "setProperty") \ template(getProperty_name, "getProperty") \ template(context_name, "context") \ @@ -473,10 +464,6 @@ template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \ template(module_entry_name, "module_entry") \ \ - /* non-intrinsic name/signature pairs: */ \ - template(register_method_name, "register") \ - do_alias(register_method_signature, object_void_signature) \ - \ /* name symbols needed by intrinsics */ \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \ \ @@ -877,12 +864,12 @@ do_intrinsic(_Class_cast, java_lang_Class, Class_cast_name, object_object_signature, F_R) \ do_name( Class_cast_name, "cast") \ \ - do_intrinsic(_getClassAccessFlags, sun_reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN) \ + do_intrinsic(_getClassAccessFlags, reflect_Reflection, getClassAccessFlags_name, class_int_signature, F_SN) \ do_name( getClassAccessFlags_name, "getClassAccessFlags") \ do_intrinsic(_getLength, java_lang_reflect_Array, getLength_name, object_int_signature, F_SN) \ do_name( getLength_name, "getLength") \ \ - do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, void_class_signature, F_SN) \ + do_intrinsic(_getCallerClass, reflect_Reflection, getCallerClass_name, void_class_signature, F_SN) \ do_name( getCallerClass_name, "getCallerClass") \ \ do_intrinsic(_newArray, java_lang_reflect_Array, newArray_name, newArray_signature, F_SN) \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/codeBlob.cpp --- a/hotspot/src/share/vm/code/codeBlob.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/codeBlob.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "interpreter/bytecode.hpp" #include "memory/allocation.inline.hpp" #include "memory/heap.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "prims/forte.hpp" #include "runtime/handles.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/codeCache.cpp --- a/hotspot/src/share/vm/code/codeCache.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/codeCache.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1042,6 +1042,14 @@ } } +void CodeCache::cleanup_inline_caches() { + assert_locked_or_safepoint(CodeCache_lock); + NMethodIterator iter; + while(iter.next_alive()) { + iter.method()->cleanup_inline_caches(/*clean_all=*/true); + } +} + // Keeps track of time spent for checking dependencies NOT_PRODUCT(static elapsedTimer dependentCheckTime;) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/codeCache.hpp --- a/hotspot/src/share/vm/code/codeCache.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/codeCache.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -201,6 +201,7 @@ static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches + static void cleanup_inline_caches(); // Returns true if an own CodeHeap for the given CodeBlobType is available static bool heap_available(int code_blob_type); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/compiledIC.cpp --- a/hotspot/src/share/vm/code/compiledIC.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/compiledIC.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/dependencies.cpp --- a/hotspot/src/share/vm/code/dependencies.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/dependencies.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ #include "classfile/javaClasses.inline.hpp" #include "code/dependencies.hpp" #include "compiler/compileLog.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/objArrayKlass.hpp" #include "runtime/handles.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/nmethod.cpp --- a/hotspot/src/share/vm/code/nmethod.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/nmethod.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -36,6 +36,7 @@ #include "compiler/directivesParser.hpp" #include "compiler/disassembler.hpp" #include "interpreter/bytecode.hpp" +#include "memory/resourceArea.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" @@ -1138,8 +1139,7 @@ } } - -void nmethod::cleanup_inline_caches() { +void nmethod::cleanup_inline_caches(bool clean_all/*=false*/) { assert_locked_or_safepoint(CompiledIC_lock); // If the method is not entrant or zombie then a JMP is plastered over the @@ -1169,7 +1169,7 @@ if( cb != NULL && cb->is_nmethod() ) { nmethod* nm = (nmethod*)cb; // Clean inline caches pointing to zombie, non-entrant and unloaded methods - if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); + if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); } break; } @@ -1179,7 +1179,7 @@ if( cb != NULL && cb->is_nmethod() ) { nmethod* nm = (nmethod*)cb; // Clean inline caches pointing to zombie, non-entrant and unloaded methods - if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); + if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); } break; } @@ -1321,7 +1321,7 @@ // Break cycle between nmethod & method if (log_is_enabled(Trace, classunload)) { - outputStream* log = LogHandle(classunload)::trace_stream(); + outputStream* log = Log(classunload)::trace_stream(); log->print_cr("making nmethod " INTPTR_FORMAT " unloadable, Method*(" INTPTR_FORMAT "), cause(" INTPTR_FORMAT ")", diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/code/nmethod.hpp --- a/hotspot/src/share/vm/code/nmethod.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/code/nmethod.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -599,7 +599,7 @@ // Inline cache support void clear_inline_caches(); void clear_ic_stubs(); - void cleanup_inline_caches(); + void cleanup_inline_caches(bool clean_all = false); bool inlinecache_check_contains(address addr) const { return (addr >= code_begin() && addr < verified_entry_point()); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/compileBroker.cpp --- a/hotspot/src/share/vm/compiler/compileBroker.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,8 +32,10 @@ #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" #include "compiler/directivesParser.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "interpreter/linkResolver.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" @@ -48,6 +50,7 @@ #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" +#include "runtime/timerTrace.hpp" #include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" @@ -386,13 +389,16 @@ task = CompilationPolicy::policy()->select_task(this); } - // Save method pointers across unlock safepoint. The task is removed from - // the compilation queue, which is walked during RedefineClasses. - save_method = methodHandle(task->method()); - save_hot_method = methodHandle(task->hot_method()); + if (task != NULL) { + // Save method pointers across unlock safepoint. The task is removed from + // the compilation queue, which is walked during RedefineClasses. + save_method = methodHandle(task->method()); + save_hot_method = methodHandle(task->hot_method()); - remove(task); - purge_stale_tasks(); // may temporarily release MCQ lock + remove(task); + purge_stale_tasks(); // may temporarily release MCQ lock + } + return task; } @@ -901,7 +907,7 @@ // the pending list lock or a 3-way deadlock may occur // between the reference handler thread, a GC (instigated // by a compiler thread), and compiled method registration. - if (InstanceRefKlass::owns_pending_list_lock(JavaThread::current())) { + if (ReferencePendingListLocker::is_locked_by_self()) { return; } @@ -1781,7 +1787,8 @@ bool is_osr = (osr_bci != standard_entry_bci); bool should_log = (thread->log() != NULL); bool should_break = false; - int task_level = task->comp_level(); + const int task_level = task->comp_level(); + AbstractCompiler* comp = task->compiler(); DirectiveSet* directive; { @@ -1793,7 +1800,7 @@ assert(!method->is_native(), "no longer compile natives"); // Look up matching directives - directive = DirectivesStack::getMatchingDirective(method, compiler(task_level)); + directive = DirectivesStack::getMatchingDirective(method, comp); // Save information about this method in case of failure. set_last_compile(thread, method, is_osr, task_level); @@ -1812,13 +1819,13 @@ int compilable = ciEnv::MethodCompilable; const char* failure_reason = NULL; const char* retry_message = NULL; - AbstractCompiler *comp = compiler(task_level); int system_dictionary_modification_counter; { MutexLocker locker(Compile_lock, thread); system_dictionary_modification_counter = SystemDictionary::number_of_modifications(); } + #if INCLUDE_JVMCI if (UseJVMCICompiler && comp != NULL && comp->is_jvmci()) { JVMCICompiler* jvmci = (JVMCICompiler*) comp; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/compileTask.cpp --- a/hotspot/src/share/vm/compiler/compileTask.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/compileTask.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "compiler/compileLog.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compilerDirectives.hpp" +#include "memory/resourceArea.hpp" CompileTask* CompileTask::_task_free_list = NULL; #ifdef ASSERT @@ -122,6 +123,13 @@ _next = NULL; } +/** + * Returns the compiler for this task. + */ +AbstractCompiler* CompileTask::compiler() { + return CompileBroker::compiler(_comp_level); +} + // ------------------------------------------------------------------ // CompileTask::code/set_code // diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/compileTask.hpp --- a/hotspot/src/share/vm/compiler/compileTask.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/compileTask.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,6 +115,8 @@ int comp_level() { return _comp_level;} void set_comp_level(int comp_level) { _comp_level = comp_level;} + AbstractCompiler* compiler(); + int num_inlined_bytecodes() const { return _num_inlined_bytecodes; } void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/compilerDirectives.cpp --- a/hotspot/src/share/vm/compiler/compilerDirectives.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/compilerDirectives.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,6 +28,7 @@ #include "compiler/abstractCompiler.hpp" #include "compiler/compilerDirectives.hpp" #include "compiler/compilerOracle.hpp" +#include "memory/resourceArea.hpp" CompilerDirectives::CompilerDirectives() :_match(NULL), _next(NULL), _ref_count(0) { _c1_store = new DirectiveSet(this); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/directivesParser.cpp --- a/hotspot/src/share/vm/compiler/directivesParser.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/directivesParser.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -26,6 +26,7 @@ #include "compiler/compileBroker.hpp" #include "compiler/directivesParser.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "runtime/os.hpp" #include diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/disassembler.cpp --- a/hotspot/src/share/vm/compiler/disassembler.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/disassembler.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "compiler/disassembler.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/methodLiveness.cpp --- a/hotspot/src/share/vm/compiler/methodLiveness.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/methodLiveness.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,8 @@ #include "interpreter/bytecode.hpp" #include "interpreter/bytecodes.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/timerTrace.hpp" #include "utilities/bitMap.inline.hpp" // The MethodLiveness class performs a simple liveness analysis on a method diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/compiler/methodMatcher.cpp --- a/hotspot/src/share/vm/compiler/methodMatcher.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/compiler/methodMatcher.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "compiler/methodMatcher.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" // The JVM specification defines the allowed characters. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp --- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,7 +30,6 @@ #include "gc/shared/blockOffsetTable.inline.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" -#include "gc/shared/liveRange.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/spaceDecorator.hpp" #include "memory/allocation.inline.hpp" @@ -501,7 +500,7 @@ void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const { assert_lock_strong(&_freelistLock); - LogHandle(gc, freelist, stats) log; + Log(gc, freelist, stats) log; if (!log.is_debug()) { return; } @@ -1931,11 +1930,6 @@ if (blk->_ptr == NULL) { refillLinearAllocBlock(blk); } - if (PrintMiscellaneous && Verbose) { - if (blk->_word_size == 0) { - warning("CompactibleFreeListSpace(prologue):: Linear allocation failure"); - } - } } void @@ -2205,7 +2199,7 @@ } } if (res == 0) { - LogHandle(gc, verify) log; + Log(gc, verify) log; log.error("Livelock: no rank reduction!"); log.error(" Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", @@ -2379,14 +2373,14 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { assert_lock_strong(&_freelistLock); - LogHandle(gc, freelist, census) log; - if (!log.is_debug()) { + LogTarget(Debug, gc, freelist, census) log; + if (!log.is_enabled()) { return; } AdaptiveFreeList total; - log.debug("end sweep# " SIZE_FORMAT, sweep_count); + log.print("end sweep# " SIZE_FORMAT, sweep_count); ResourceMark rm; - outputStream* out = log.debug_stream(); + outputStream* out = log.stream(); AdaptiveFreeList::print_labels_on(out, "size"); size_t total_free = 0; for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { @@ -2408,8 +2402,8 @@ total.set_split_deaths(total.split_deaths() + fl->split_deaths()); } total.print_on(out, "TOTAL"); - log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free); - log.debug("growth: %8.5f deficit: %8.5f", + log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free); + log.print("growth: %8.5f deficit: %8.5f", (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/ (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0), (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); @@ -2541,7 +2535,7 @@ _blocks_to_claim[i].sample( MAX2(CMSOldPLABMin, MIN2(CMSOldPLABMax, - _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills)))); + _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills))); } // Reset counters for next round _global_num_workers[i] = 0; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp --- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -502,7 +502,7 @@ { MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); if (!_markBitMap.allocate(_span)) { - warning("Failed to allocate CMS Bit Map"); + log_warning(gc)("Failed to allocate CMS Bit Map"); return; } assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); @@ -513,7 +513,7 @@ } if (!_markStack.allocate(MarkStackSize)) { - warning("Failed to allocate CMS Marking Stack"); + log_warning(gc)("Failed to allocate CMS Marking Stack"); return; } @@ -527,8 +527,7 @@ _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", ConcGCThreads, true); if (_conc_workers == NULL) { - warning("GC/CMS: _conc_workers allocation failure: " - "forcing -CMSConcurrentMTEnabled"); + log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled"); CMSConcurrentMTEnabled = false; } else { _conc_workers->initialize_workers(); @@ -559,7 +558,7 @@ && num_queues > 0) { _task_queues = new OopTaskQueueSet(num_queues); if (_task_queues == NULL) { - warning("task_queues allocation failure."); + log_warning(gc)("task_queues allocation failure."); return; } _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC); @@ -567,7 +566,7 @@ for (i = 0; i < num_queues; i++) { PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); if (q == NULL) { - warning("work_queue allocation failure."); + log_warning(gc)("work_queue allocation failure."); return; } _task_queues->register_queue(i, q); @@ -694,7 +693,7 @@ // At a promotion failure dump information on block layout in heap // (cms old generation). void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { - LogHandle(gc, promotion) log; + Log(gc, promotion) log; if (log.is_trace()) { ResourceMark rm; cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream()); @@ -753,7 +752,7 @@ size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity >= capacity(), "invalid expansion size"); size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); - LogHandle(gc) log; + Log(gc) log; if (log.is_trace()) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); log.trace("From compute_new_size: "); @@ -1118,7 +1117,7 @@ // ------------------------------------------------------------------ // Print out lots of information which affects the initiation of // a collection. - LogHandle(gc) log; + Log(gc) log; if (log.is_trace() && stats().valid()) { log.trace("CMSCollector shouldConcurrentCollect: "); ResourceMark rm; @@ -1413,7 +1412,7 @@ if (_foregroundGCShouldWait) { // We are going to be waiting for action for the CMS thread; // it had better not be gone (for instance at shutdown)! - assert(ConcurrentMarkSweepThread::cmst() != NULL, + assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(), "CMS thread must be running"); // Wait here until the background collector gives us the go-ahead ConcurrentMarkSweepThread::clear_CMS_flag( @@ -1519,7 +1518,7 @@ gch->pre_full_gc_dump(gc_timer); - GCTraceTime(Trace, gc) t("CMS:MSC"); + GCTraceTime(Trace, gc, phases) t("CMS:MSC"); // Temporarily widen the span of the weak reference processing to // the entire heap. @@ -1606,7 +1605,7 @@ } void CMSCollector::print_eden_and_survivor_chunk_arrays() { - LogHandle(gc, heap) log; + Log(gc, heap) log; if (!log.is_trace()) { return; } @@ -2222,7 +2221,7 @@ bool do_bit(size_t offset) { HeapWord* addr = _marks->offsetToHeapWord(offset); if (!_marks->isMarked(addr)) { - LogHandle(gc, verify) log; + Log(gc, verify) log; ResourceMark rm; oop(addr)->print_on(log.error_stream()); log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); @@ -2235,7 +2234,7 @@ }; bool CMSCollector::verify_after_remark() { - GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking."); + GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking."); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; @@ -2287,17 +2286,16 @@ // all marking, then check if the new marks-vector is // a subset of the CMS marks-vector. verify_after_remark_work_1(); - } else if (CMSRemarkVerifyVariant == 2) { + } else { + guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2"); // In this second variant of verification, we flag an error // (i.e. an object reachable in the new marks-vector not reachable // in the CMS marks-vector) immediately, also indicating the // identify of an object (A) that references the unmarked object (B) -- // presumably, a mutation to A failed to be picked up by preclean/remark? verify_after_remark_work_2(); - } else { - warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant", - CMSRemarkVerifyVariant); - } + } + return true; } @@ -2349,7 +2347,7 @@ VerifyMarkedClosure vcl(markBitMap()); verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { - LogHandle(gc, verify) log; + Log(gc, verify) log; log.error("Failed marking verification after remark"); ResourceMark rm; gch->print_on(log.error_stream()); @@ -2820,7 +2818,7 @@ // CMS collection cycle. setup_cms_unloading_and_verification_state(); - GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm); // Reset all the PLAB chunk arrays if necessary. if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { @@ -3650,7 +3648,7 @@ // XXX FIX ME!!! YSR size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; while (!(should_abort_preclean() || - ConcurrentMarkSweepThread::should_terminate())) { + ConcurrentMarkSweepThread::cmst()->should_terminate())) { workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); cumworkdone += workdone; loops++; @@ -4104,8 +4102,6 @@ // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); - GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm); - gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size @@ -4123,7 +4119,7 @@ } void CMSCollector::checkpointRootsFinalWork() { - GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm); assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -4173,10 +4169,10 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled) { - GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm); do_remark_parallel(); } else { - GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm); do_remark_non_parallel(); } } @@ -4184,7 +4180,7 @@ verify_overflow_empty(); { - GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm); refProcessingWork(); } verify_work_stacks_empty(); @@ -4907,7 +4903,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -4941,7 +4937,7 @@ Universe::verify(); } { - GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm); verify_work_stacks_empty(); @@ -4963,7 +4959,7 @@ } { - GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm); verify_work_stacks_empty(); @@ -4982,7 +4978,7 @@ } { - GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm); + GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm); verify_work_stacks_empty(); @@ -5186,7 +5182,7 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { @@ -5228,7 +5224,7 @@ if (should_unload_classes()) { { - GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -5241,13 +5237,13 @@ } { - GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } { - GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm); + GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } @@ -5657,13 +5653,13 @@ ReservedSpace brs(ReservedSpace::allocation_align_size_up( (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); if (!brs.is_reserved()) { - warning("CMS bit map allocation failure"); + log_warning(gc)("CMS bit map allocation failure"); return false; } // For now we'll just commit all of the bit map up front. // Later on we'll try to be more parsimonious with swap. if (!_virtual_space.initialize(brs, brs.size())) { - warning("CMS bit map backing store failure"); + log_warning(gc)("CMS bit map backing store failure"); return false; } assert(_virtual_space.committed_size() == brs.size(), @@ -5749,11 +5745,11 @@ ReservedSpace rs(ReservedSpace::allocation_align_size_up( size * sizeof(oop))); if (!rs.is_reserved()) { - warning("CMSMarkStack allocation failure"); + log_warning(gc)("CMSMarkStack allocation failure"); return false; } if (!_virtual_space.initialize(rs, rs.size())) { - warning("CMSMarkStack backing store failure"); + log_warning(gc)("CMSMarkStack backing store failure"); return false; } assert(_virtual_space.committed_size() == rs.size(), @@ -5878,7 +5874,7 @@ if (_span.contains(addr)) { _verification_bm->mark(addr); if (!_cms_bm->isMarked(addr)) { - LogHandle(gc, verify) log; + Log(gc, verify) log; ResourceMark rm; oop(addr)->print_on(log.error_stream()); log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); @@ -6659,7 +6655,7 @@ // Oop lies in _span and isn't yet grey or black _verification_bm->mark(addr); // now grey if (!_cms_bm->isMarked(addr)) { - LogHandle(gc, verify) log; + Log(gc, verify) log; ResourceMark rm; oop(addr)->print_on(log.error_stream()); log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); @@ -7047,13 +7043,13 @@ } void SweepClosure::print_on(outputStream* st) const { - tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(_sp->bottom()), p2i(_sp->end())); - tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit)); - tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger)); - NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));) - tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", - _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); + st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", + p2i(_sp->bottom()), p2i(_sp->end())); + st->print_cr("_limit = " PTR_FORMAT, p2i(_limit)); + st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger)); + NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));) + st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", + _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); } #ifndef PRODUCT @@ -7066,8 +7062,10 @@ assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); if (inFreeRange()) { - warning("inFreeRange() should have been reset; dumping state of SweepClosure"); - print(); + Log(gc, sweep) log; + log.error("inFreeRange() should have been reset; dumping state of SweepClosure"); + ResourceMark rm; + print_on(log.error_stream()); ShouldNotReachHere(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp --- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "gc/cms/concurrentMarkSweepThread.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/genCollectedHeap.hpp" -#include "oops/instanceRefKlass.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "oops/oop.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" @@ -42,16 +42,10 @@ ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL; CMSCollector* ConcurrentMarkSweepThread::_collector = NULL; -bool ConcurrentMarkSweepThread::_should_terminate = false; int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil; volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; -SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL; -SurrogateLockerThread::SLT_msg_type - ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty; -Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL; - ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector) : ConcurrentGCThread() { assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set"); @@ -62,88 +56,58 @@ set_name("CMS Main Thread"); - if (os::create_thread(this, os::cgc_thread)) { - // An old comment here said: "Priority should be just less - // than that of VMThread". Since the VMThread runs at - // NearMaxPriority, the old comment was inaccurate, but - // changing the default priority to NearMaxPriority-1 - // could change current behavior, so the default of - // NearMaxPriority stays in place. - // - // Note that there's a possibility of the VMThread - // starving if UseCriticalCMSThreadPriority is on. - // That won't happen on Solaris for various reasons, - // but may well happen on non-Solaris platforms. - int native_prio; - if (UseCriticalCMSThreadPriority) { - native_prio = os::java_to_os_priority[CriticalPriority]; - } else { - native_prio = os::java_to_os_priority[NearMaxPriority]; - } - os::set_native_priority(this, native_prio); - - if (!DisableStartThread) { - os::start_thread(this); - } - } - _sltMonitor = SLT_lock; + // An old comment here said: "Priority should be just less + // than that of VMThread". Since the VMThread runs at + // NearMaxPriority, the old comment was inaccurate, but + // changing the default priority to NearMaxPriority-1 + // could change current behavior, so the default of + // NearMaxPriority stays in place. + // + // Note that there's a possibility of the VMThread + // starving if UseCriticalCMSThreadPriority is on. + // That won't happen on Solaris for various reasons, + // but may well happen on non-Solaris platforms. + create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority); } -void ConcurrentMarkSweepThread::run() { +void ConcurrentMarkSweepThread::run_service() { assert(this == cmst(), "just checking"); - initialize_in_thread(); - // From this time Thread::current() should be working. - assert(this == Thread::current(), "just checking"); if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) { - warning("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread); + log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread); } - // Wait until Universe::is_fully_initialized() + { - CMSLoopCountWarn loopX("CMS::run", "waiting for " - "Universe::is_fully_initialized()", 2); MutexLockerEx x(CGC_lock, true); set_CMS_flag(CMS_cms_wants_token); - // Wait until Universe is initialized and all initialization is completed. - while (!is_init_completed() && !Universe::is_fully_initialized() && - !_should_terminate) { - CGC_lock->wait(true, 200); - loopX.tick(); - } + assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this."); + // Wait until the surrogate locker thread that will do // pending list locking on our behalf has been created. // We cannot start the SLT thread ourselves since we need // to be a JavaThread to do so. CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2); - while (_slt == NULL && !_should_terminate) { + while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) { CGC_lock->wait(true, 200); loopY.tick(); } clear_CMS_flag(CMS_cms_wants_token); } - while (!_should_terminate) { + while (!should_terminate()) { sleepBeforeNextCycle(); - if (_should_terminate) break; + if (should_terminate()) break; GCIdMark gc_id_mark; GCCause::Cause cause = _collector->_full_gc_requested ? _collector->_full_gc_cause : GCCause::_cms_concurrent_mark; _collector->collect_in_background(cause); } - assert(_should_terminate, "just checking"); + // Check that the state of any protocol for synchronization // between background (CMS) and foreground collector is "clean" // (i.e. will not potentially block the foreground collector, // requiring action by us). verify_ok_to_terminate(); - // Signal that it is terminated - { - MutexLockerEx mu(Terminator_lock, - Mutex::_no_safepoint_check_flag); - assert(_cmst == this, "Weird!"); - _cmst = NULL; - Terminator_lock->notify(); - } } #ifndef PRODUCT @@ -157,39 +121,24 @@ // create and start a new ConcurrentMarkSweep Thread for given CMS generation ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) { - if (!_should_terminate) { - assert(cmst() == NULL, "start() called twice?"); - ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector); - assert(cmst() == th, "Where did the just-created CMS thread go?"); - return th; - } - return NULL; + guarantee(_cmst == NULL, "start() called twice!"); + ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector); + assert(_cmst == th, "Where did the just-created CMS thread go?"); + return th; } -void ConcurrentMarkSweepThread::stop() { - // it is ok to take late safepoints here, if needed - { - MutexLockerEx x(Terminator_lock); - _should_terminate = true; - } - { // Now post a notify on CGC_lock so as to nudge - // CMS thread(s) that might be slumbering in - // sleepBeforeNextCycle. - MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - CGC_lock->notify_all(); - } - { // Now wait until (all) CMS thread(s) have exited - MutexLockerEx x(Terminator_lock); - while(cmst() != NULL) { - Terminator_lock->wait(); - } - } +void ConcurrentMarkSweepThread::stop_service() { + // Now post a notify on CGC_lock so as to nudge + // CMS thread(s) that might be slumbering in + // sleepBeforeNextCycle. + MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); + CGC_lock->notify_all(); } void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) { assert(tc != NULL, "Null ThreadClosure"); - if (_cmst != NULL) { - tc->do_thread(_cmst); + if (cmst() != NULL && !cmst()->has_terminated()) { + tc->do_thread(cmst()); } assert(Universe::is_fully_initialized(), "Called too early, make sure heap is fully initialized"); @@ -202,8 +151,8 @@ } void ConcurrentMarkSweepThread::print_all_on(outputStream* st) { - if (_cmst != NULL) { - _cmst->print_on(st); + if (cmst() != NULL && !cmst()->has_terminated()) { + cmst()->print_on(st); st->cr(); } if (_collector != NULL) { @@ -278,7 +227,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) { MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - if (_should_terminate || _collector->_full_gc_requested) { + if (should_terminate() || _collector->_full_gc_requested) { return; } set_CMS_flag(CMS_cms_wants_token); // to provoke notifies @@ -307,7 +256,7 @@ unsigned int loop_count = 0; - while(!_should_terminate) { + while(!should_terminate()) { double now_time = os::elapsedTime(); long wait_time_millis; @@ -327,7 +276,7 @@ { MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - if (_should_terminate || _collector->_full_gc_requested) { + if (should_terminate() || _collector->_full_gc_requested) { return; } set_CMS_flag(CMS_cms_wants_token); // to provoke notifies @@ -358,13 +307,13 @@ // Too many loops warning if(++loop_count == 0) { - warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1); + log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1); } } } void ConcurrentMarkSweepThread::sleepBeforeNextCycle() { - while (!_should_terminate) { + while (!should_terminate()) { if(CMSWaitDuration >= 0) { // Wait until the next synchronous GC, a concurrent full gc // request or a timeout, whichever is earlier. @@ -381,15 +330,3 @@ // and wait some more } } - -// Note: this method, although exported by the ConcurrentMarkSweepThread, -// which is a non-JavaThread, can only be called by a JavaThread. -// Currently this is done at vm creation time (post-vm-init) by the -// main/Primordial (Java)Thread. -// XXX Consider changing this in the future to allow the CMS thread -// itself to create this thread? -void ConcurrentMarkSweepThread::makeSurrogateLockerThread(TRAPS) { - assert(UseConcMarkSweepGC, "SLT thread needed only for CMS GC"); - assert(_slt == NULL, "SLT already created"); - _slt = SurrogateLockerThread::make(THREAD); -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.hpp --- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,17 +37,10 @@ friend class VMStructs; friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship friend class CMSCollector; - public: - virtual void run(); private: - static ConcurrentMarkSweepThread* _cmst; - static CMSCollector* _collector; - static SurrogateLockerThread* _slt; - static SurrogateLockerThread::SLT_msg_type _sltBuffer; - static Monitor* _sltMonitor; - - static bool _should_terminate; + static ConcurrentMarkSweepThread* _cmst; + static CMSCollector* _collector; enum CMS_flag_type { CMS_nil = NoBits, @@ -72,13 +65,13 @@ // debugging void verify_ok_to_terminate() const PRODUCT_RETURN; + void run_service(); + void stop_service(); + public: // Constructor ConcurrentMarkSweepThread(CMSCollector* collector); - static void makeSurrogateLockerThread(TRAPS); - static SurrogateLockerThread* slt() { return _slt; } - static void threads_do(ThreadClosure* tc); // Printing @@ -91,8 +84,6 @@ // Create and start the CMS Thread, or stop it on shutdown static ConcurrentMarkSweepThread* start(CMSCollector* collector); - static void stop(); - static bool should_terminate() { return _should_terminate; } // Synchronization using CMS token static void synchronize(bool is_cms_thread); @@ -170,7 +161,7 @@ inline void tick() { _ticks++; if (CMSLoopWarn && _ticks % _threshold == 0) { - warning("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg); + log_warning(gc)("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg); } } }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp --- a/hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -161,15 +161,6 @@ } } - -// If you want a talkative process_chunk_boundaries, -// then #define NOISY(x) x -#ifdef NOISY -#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow" -#else -#define NOISY(x) -#endif - void CardTableModRefBSForCTRS:: process_chunk_boundaries(Space* sp, @@ -197,10 +188,6 @@ assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error."); uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; - NOISY(tty->print_cr("===========================================================================");) - NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")", - chunk_mr.start(), chunk_mr.end());) - // First, set "our" lowest_non_clean entry, which would be // used by the thread scanning an adjoining left chunk with // a non-array object straddling the mutual boundary. @@ -239,36 +226,18 @@ } } if (first_dirty_card != NULL) { - NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk", - first_dirty_card);) assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error."); assert(lowest_non_clean[cur_chunk_index] == NULL, "Write exactly once : value should be stable hereafter for this round"); lowest_non_clean[cur_chunk_index] = first_dirty_card; - } NOISY(else { - tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL"); - // In the future, we could have this thread look for a non-NULL value to copy from its - // right neighbor (up to the end of the first object). - if (last_card_of_cur_chunk < last_card_of_first_obj) { - tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n" - " might be efficient to get value from right neighbor?"); - } - }) + } } else { // In this case we can help our neighbor by just asking them // to stop at our first card (even though it may not be dirty). - NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");) assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; } - NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT - " which corresponds to the heap address " PTR_FORMAT, - cur_chunk_index, lowest_non_clean[cur_chunk_index], - (lowest_non_clean[cur_chunk_index] != NULL) - ? addr_for(lowest_non_clean[cur_chunk_index]) - : NULL);) - NOISY(tty->print_cr("---------------------------------------------------------------------------");) // Next, set our own max_to_do, which will strictly/exclusively bound // the highest address that we will scan past the right end of our chunk. @@ -285,8 +254,6 @@ || oop(last_block)->is_objArray() // last_block is an array (precisely marked) || oop(last_block)->is_typeArray()) { max_to_do = chunk_mr.end(); - NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n" - " max_to_do left at " PTR_FORMAT, max_to_do);) } else { assert(last_block < chunk_mr.end(), "Tautology"); // It is a non-array object that straddles the right boundary of this chunk. @@ -301,9 +268,6 @@ // subsequent cards still in this chunk must have been made // precisely; we can cap processing at the end of our chunk. max_to_do = chunk_mr.end(); - NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n" - " max_to_do left at " PTR_FORMAT, - max_to_do);) } else { // The last object must be considered dirty, and extends onto the // following chunk. Look for a dirty card in that chunk that will @@ -323,8 +287,6 @@ cur <= last_card_of_last_obj; cur++) { const jbyte val = *cur; if (card_will_be_scanned(val)) { - NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x", - cur, (int)val);) limit_card = cur; break; } else { assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); @@ -333,10 +295,6 @@ if (limit_card != NULL) { max_to_do = addr_for(limit_card); assert(limit_card != NULL && max_to_do != NULL, "Error"); - NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT - " max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: " - PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, - limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));) } else { // The following is a pessimistic value, because it's possible // that a dirty card on a subsequent chunk has been cleared by @@ -346,10 +304,6 @@ limit_card = last_card_of_last_obj; max_to_do = last_block + last_block_size; assert(limit_card != NULL && max_to_do != NULL, "Error"); - NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n" - " Setting limit_card to " PTR_FORMAT - " and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, - limit_card, last_block, last_block_size, max_to_do);) } assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, "Bounds error."); @@ -382,7 +336,6 @@ "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", p2i(sp->used_region().start()), p2i(sp->used_region().end()), p2i(used.start()), p2i(used.end())); - NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");) last_chunk_index_to_check = last_chunk_index; } for (uintptr_t lnc_index = cur_chunk_index + 1; @@ -392,9 +345,6 @@ if (lnc_card != NULL) { // we can stop at the first non-NULL entry we find if (lnc_card <= limit_card) { - NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT, - " max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT, - lnc_card, limit_card, addr_for(lnc_card), max_to_do);) limit_card = lnc_card; max_to_do = addr_for(limit_card); assert(limit_card != NULL && max_to_do != NULL, "Error"); @@ -410,9 +360,6 @@ assert(max_to_do != NULL, "OOPS 2!"); } else { max_to_do = used.end(); - NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n" - " max_to_do left at " PTR_FORMAT, - max_to_do);) } assert(max_to_do != NULL, "OOPS 3!"); // Now we can set the closure we're using so it doesn't to beyond @@ -421,11 +368,8 @@ #ifndef PRODUCT dcto_cl->set_last_bottom(max_to_do); #endif - NOISY(tty->print_cr("===========================================================================\n");) } -#undef NOISY - void CardTableModRefBSForCTRS:: get_LNC_array_for_space(Space* sp, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/parNewGeneration.cpp --- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -233,11 +233,15 @@ if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { // Is small enough; abandon this buffer and start a new one. plab->retire(); - size_t buf_size = plab->word_sz(); + // The minimum size has to be twice SurvivorAlignmentInBytes to + // allow for padding used in the alignment of 1 word. A padding + // of 1 is too small for a filler word so the padding size will + // be increased by SurvivorAlignmentInBytes. + size_t min_usable_size = 2 * static_cast(SurvivorAlignmentInBytes >> LogHeapWordSize); + size_t buf_size = MAX2(plab->word_sz(), min_usable_size); HeapWord* buf_space = sp->par_allocate(buf_size); if (buf_space == NULL) { - const size_t min_bytes = - PLAB::min_size() << LogHeapWordSize; + const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize; size_t free_bytes = sp->free(); while(buf_space == NULL && free_bytes >= min_bytes) { buf_size = free_bytes >> LogHeapWordSize; @@ -253,7 +257,10 @@ // Note that we cannot compare buf_size < word_sz below // because of AlignmentReserve (see PLAB::allocate()). assert(obj != NULL || plab->words_remaining() < word_sz, - "Else should have been able to allocate"); + "Else should have been able to allocate requested object size " + SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes " + SIZE_FORMAT ", words_remaining " SIZE_FORMAT, + word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining()); // It's conceivable that we may be able to use the // buffer we just grabbed for subsequent small requests // even if not for this one. @@ -391,7 +398,7 @@ } void ParScanThreadStateSet::print_termination_stats() { - LogHandle(gc, task, stats) log; + Log(gc, task, stats) log; if (!log.is_debug()) { return; } @@ -423,7 +430,7 @@ if (!log_develop_is_enabled(Trace, gc, task, stats)) { return; } - LogHandle(gc, task, stats) log; + Log(gc, task, stats) log; ResourceMark rm; outputStream* st = log.trace_stream(); print_taskqueue_stats_hdr(st); @@ -901,7 +908,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause()); + GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause()); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/parOopClosures.inline.hpp --- a/hotspot/src/share/vm/gc/cms/parOopClosures.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/parOopClosures.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -82,18 +82,19 @@ if ((HeapWord*)obj < _boundary) { #ifndef PRODUCT if (_g->to()->is_in_reserved(obj)) { - tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p)); + Log(gc) log; + log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p)); GenCollectedHeap* gch = GenCollectedHeap::heap(); Space* sp = gch->space_containing(p); oop obj = oop(sp->block_start(p)); assert((HeapWord*)obj < (HeapWord*)p, "Error"); - tty->print_cr("Object: " PTR_FORMAT, p2i((void *)obj)); - tty->print_cr("-------"); - obj->print(); - tty->print_cr("-----"); - tty->print_cr("Heap:"); - tty->print_cr("-----"); - gch->print(); + log.error("Object: " PTR_FORMAT, p2i((void *)obj)); + log.error("-------"); + obj->print_on(log.error_stream()); + log.error("-----"); + log.error("Heap:"); + log.error("-----"); + gch->print_on(log.error_stream()); ShouldNotReachHere(); } #endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp --- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,27 +38,17 @@ // Methods in abstract class VM_CMS_Operation ////////////////////////////////////////////////////////// void VM_CMS_Operation::acquire_pending_list_lock() { - // The caller may block while communicating - // with the SLT thread in order to acquire/release the PLL. - SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt(); - if (slt != NULL) { - slt->manipulatePLL(SurrogateLockerThread::acquirePLL); - } else { - SurrogateLockerThread::report_missing_slt(); - } + _pending_list_locker.lock(); } void VM_CMS_Operation::release_and_notify_pending_list_lock() { - // The caller may block while communicating - // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkSweepThread::slt()-> - manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); + _pending_list_locker.unlock(); } void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm); + GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -70,7 +60,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm); + GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -95,7 +85,7 @@ assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); - if (needs_pll()) { + if (needs_pending_list_lock()) { acquire_pending_list_lock(); } // Get the Heap_lock after the pending_list_lock. @@ -103,7 +93,7 @@ if (lost_race()) { assert(_prologue_succeeded == false, "Initialized in c'tor"); Heap_lock->unlock(); - if (needs_pll()) { + if (needs_pending_list_lock()) { release_and_notify_pending_list_lock(); } } else { @@ -120,7 +110,7 @@ // Release the Heap_lock first. Heap_lock->unlock(); - if (needs_pll()) { + if (needs_pending_list_lock()) { release_and_notify_pending_list_lock(); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp --- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcId.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "gc/shared/vmGCOperations.hpp" #include "runtime/vm_operations.hpp" @@ -51,6 +52,9 @@ class CMSCollector; class VM_CMS_Operation: public VM_Operation { + private: + ReferencePendingListLocker _pending_list_locker; + protected: CMSCollector* _collector; // associated collector bool _prologue_succeeded; // whether doit_prologue succeeded @@ -73,7 +77,7 @@ virtual const CMSCollector::CollectorState legal_state() const = 0; // Whether the pending list lock needs to be held - virtual const bool needs_pll() const = 0; + virtual const bool needs_pending_list_lock() const = 0; // Execute operations in the context of the caller, // prior to execution of the vm operation itself. @@ -105,7 +109,7 @@ return CMSCollector::InitialMarking; } - virtual const bool needs_pll() const { + virtual const bool needs_pending_list_lock() const { return false; } }; @@ -122,7 +126,7 @@ return CMSCollector::FinalMarking; } - virtual const bool needs_pll() const { + virtual const bool needs_pending_list_lock() const { return true; } }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/cms/vmStructs_cms.hpp --- a/hotspot/src/share/vm/gc/cms/vmStructs_cms.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/cms/vmStructs_cms.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,14 +51,12 @@ declare_type(ConcurrentMarkSweepGeneration,CardGeneration) \ declare_type(CompactibleFreeListSpace, CompactibleSpace) \ declare_type(ConcurrentMarkSweepThread, NamedThread) \ - declare_type(SurrogateLockerThread, JavaThread) \ declare_toplevel_type(CMSCollector) \ declare_toplevel_type(CMSBitMap) \ declare_toplevel_type(FreeChunk) \ declare_toplevel_type(Metablock) \ declare_toplevel_type(ConcurrentMarkSweepThread*) \ declare_toplevel_type(ConcurrentMarkSweepGeneration*) \ - declare_toplevel_type(SurrogateLockerThread*) \ declare_toplevel_type(CompactibleFreeListSpace*) \ declare_toplevel_type(CMSCollector*) \ declare_toplevel_type(AFLBinaryTreeDictionary) \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp --- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,7 +145,6 @@ verify(); } - void CollectionSetChooser::add_region(HeapRegion* hr) { assert(!hr->is_pinned(), "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()); @@ -210,4 +209,67 @@ _front = 0; _end = 0; _remaining_reclaimable_bytes = 0; +} + +class ParKnownGarbageHRClosure: public HeapRegionClosure { + G1CollectedHeap* _g1h; + CSetChooserParUpdater _cset_updater; + +public: + ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, + uint chunk_size) : + _g1h(G1CollectedHeap::heap()), + _cset_updater(hrSorted, true /* parallel */, chunk_size) { } + + bool doHeapRegion(HeapRegion* r) { + // Do we have any marking information for this region? + if (r->is_marked()) { + // We will skip any region that's currently used as an old GC + // alloc region (we should not consider those for collection + // before we fill them up). + if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { + _cset_updater.add_region(r); + } + } + return false; + } }; + +class ParKnownGarbageTask: public AbstractGangTask { + CollectionSetChooser* _hrSorted; + uint _chunk_size; + G1CollectedHeap* _g1; + HeapRegionClaimer _hrclaimer; + +public: + ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : + AbstractGangTask("ParKnownGarbageTask"), + _hrSorted(hrSorted), _chunk_size(chunk_size), + _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} + + void work(uint worker_id) { + ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); + _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); + } +}; + +uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { + assert(n_workers > 0, "Active gc workers should be greater than 0"); + const uint overpartition_factor = 4; + const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); + return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); +} + +void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) { + clear(); + + uint n_workers = workers->active_workers(); + + uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); + prepare_for_par_region_addition(n_workers, n_regions, chunk_size); + + ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers); + workers->run_task(&par_known_garbage_task); + + sort_regions(); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp --- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,6 +65,9 @@ // The sum of reclaimable bytes over all the regions in the CSet chooser. size_t _remaining_reclaimable_bytes; + // Calculate and return chunk size (in number of regions) for parallel + // addition of regions + uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; public: // Return the current candidate region to be considered for @@ -132,6 +135,8 @@ void clear(); + void rebuild(WorkGang* workers, uint n_regions); + // Return the number of candidate regions that remain to be collected. uint remaining_regions() { return _end - _front; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,11 +27,13 @@ #include "gc/g1/concurrentG1RefineThread.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1HotCardCache.hpp" +#include "gc/g1/g1Predictions.hpp" #include "runtime/java.hpp" -ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) : +ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictor) : _threads(NULL), _sample_thread(NULL), + _predictor_sigma(predictor->sigma()), _hot_card_cache(g1h) { // Ergonomically select initial concurrent refinement parameters @@ -49,10 +51,12 @@ FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2); } set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone())); + } ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) { - ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h); + G1CollectorPolicy* policy = g1h->g1_policy(); + ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h, &policy->predictor()); if (cg1r == NULL) { *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not create ConcurrentG1Refine"); @@ -155,3 +159,43 @@ _sample_thread->print_on(st); st->cr(); } + +void ConcurrentG1Refine::adjust(double update_rs_time, + double update_rs_processed_buffers, + double goal_ms) { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + + if (G1UseAdaptiveConcRefinement) { + const int k_gy = 3, k_gr = 6; + const double inc_k = 1.1, dec_k = 0.9; + + size_t g = green_zone(); + if (update_rs_time > goal_ms) { + g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. + } else { + if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { + g = (size_t)MAX2(g * inc_k, g + 1.0); + } + } + // Change the refinement threads params + set_green_zone(g); + set_yellow_zone(g * k_gy); + set_red_zone(g * k_gr); + reinitialize_threads(); + + size_t processing_threshold_delta = MAX2(green_zone() * _predictor_sigma, 1); + size_t processing_threshold = MIN2(green_zone() + processing_threshold_delta, + yellow_zone()); + // Change the barrier params + dcqs.set_process_completed_threshold((int)processing_threshold); + dcqs.set_max_completed_queue((int)red_zone()); + } + + size_t curr_queue_size = dcqs.completed_buffers_num(); + if (curr_queue_size >= yellow_zone()) { + dcqs.set_completed_queue_padding(curr_queue_size); + } else { + dcqs.set_completed_queue_padding(0); + } + dcqs.notify_if_necessary(); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -35,6 +35,7 @@ class ConcurrentG1RefineThread; class G1CollectedHeap; class G1HotCardCache; +class G1Predictions; class G1RegionToSpaceMapper; class G1RemSet; class DirtyCardQueue; @@ -67,13 +68,15 @@ size_t _thread_threshold_step; + double _predictor_sigma; + // We delay the refinement of 'hot' cards using the hot card cache. G1HotCardCache _hot_card_cache; // Reset the threshold step value based of the current zone boundaries. void reset_threshold_step(); - ConcurrentG1Refine(G1CollectedHeap* g1h); + ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictions); public: ~ConcurrentG1Refine(); @@ -85,6 +88,8 @@ void init(G1RegionToSpaceMapper* card_counts_storage); void stop(); + void adjust(double update_rs_time, double update_rs_processed_buffers, double goal_ms); + void reinitialize_threads(); // Iterate over all concurrent refinement threads diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -76,9 +76,8 @@ } void ConcurrentG1RefineThread::wait_for_completed_buffers() { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); - while (!_should_terminate && !is_active()) { + while (!should_terminate() && !is_active()) { _monitor->wait(Mutex::_no_safepoint_check_flag); } } @@ -109,22 +108,13 @@ } } -void ConcurrentG1RefineThread::run() { - initialize_in_thread(); - wait_for_universe_init(); - - run_service(); - - terminate(); -} - void ConcurrentG1RefineThread::run_service() { _vtime_start = os::elapsedVTime(); - while (!_should_terminate) { + while (!should_terminate()) { // Wait for work wait_for_completed_buffers(); - if (_should_terminate) { + if (should_terminate()) { break; } @@ -135,7 +125,12 @@ { SuspendibleThreadSetJoiner sts_join; - do { + while (!should_terminate()) { + if (sts_join.should_yield()) { + sts_join.yield(); + continue; // Re-check for termination after yield delay. + } + size_t curr_buffer_num = dcqs.completed_buffers_num(); // If the number of the buffers falls down into the yellow zone, // that means that the transition period after the evacuation pause has ended. @@ -147,17 +142,23 @@ if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) { _next->activate(); } - } while (dcqs.apply_closure_to_completed_buffer(_refine_closure, - _worker_id + _worker_id_offset, - _deactivation_threshold, - false /* during_pause */)); - deactivate(); - log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT, - _worker_id, _deactivation_threshold, - dcqs.completed_buffers_num()); + // Process the next buffer, if there are enough left. + if (!dcqs.apply_closure_to_completed_buffer(_refine_closure, + _worker_id + _worker_id_offset, + _deactivation_threshold, + false /* during_pause */)) { + break; // Deactivate, number of buffers fell below threshold. + } + } } + deactivate(); + log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT + ", current: " SIZE_FORMAT, + _worker_id, _deactivation_threshold, + dcqs.completed_buffers_num()); + if (os::supports_vtime()) { _vtime_accum = (os::elapsedVTime() - _vtime_start); } else { @@ -168,23 +169,6 @@ log_debug(gc, refine)("Stopping %d", _worker_id); } -void ConcurrentG1RefineThread::stop() { - // it is ok to take late safepoints here, if needed - { - MutexLockerEx mu(Terminator_lock); - _should_terminate = true; - } - - stop_service(); - - { - MutexLockerEx mu(Terminator_lock); - while (!_has_terminated) { - Terminator_lock->wait(); - } - } -} - void ConcurrentG1RefineThread::stop_service() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); _monitor->notify(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1RefineThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -72,7 +72,6 @@ void stop_service(); public: - virtual void run(); // Constructor ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next, CardTableEntryClosure* refine_closure, @@ -84,9 +83,6 @@ double vtime_accum() { return _vtime_accum; } ConcurrentG1Refine* cg1r() { return _cg1r; } - - // shutdown - void stop(); }; #endif // SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" +#include "gc/g1/g1Analytics.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1MMUTracker.hpp" @@ -41,9 +42,6 @@ // The CM thread is created when the G1 garbage collector is used -SurrogateLockerThread* - ConcurrentMarkThread::_slt = NULL; - ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) : ConcurrentGCThread(), _cm(cm), @@ -82,60 +80,59 @@ // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { + const G1Analytics* analytics = g1_policy->analytics(); if (g1_policy->adaptive_young_list_length()) { double now = os::elapsedTime(); - double prediction_ms = remark ? g1_policy->predict_remark_time_ms() - : g1_policy->predict_cleanup_time_ms(); + double prediction_ms = remark ? analytics->predict_remark_time_ms() + : analytics->predict_cleanup_time_ms(); G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); os::sleep(this, sleep_time_ms, false); } } -class GCConcPhaseTimer : StackObj { +class G1ConcPhaseTimer : public GCTraceConcTimeImpl { G1ConcurrentMark* _cm; public: - GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) { - _cm->register_concurrent_phase_start(title); + G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : + GCTraceConcTimeImpl(title), + _cm(cm) { + _cm->gc_timer_cm()->register_gc_concurrent_start(title); } - ~GCConcPhaseTimer() { - _cm->register_concurrent_phase_end(); + ~G1ConcPhaseTimer() { + _cm->gc_timer_cm()->register_gc_concurrent_end(); } }; -void ConcurrentMarkThread::run() { - initialize_in_thread(); - wait_for_universe_init(); - - run_service(); - - terminate(); -} - void ConcurrentMarkThread::run_service() { _vtime_start = os::elapsedVTime(); G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1_policy = g1h->g1_policy(); - while (!_should_terminate) { + while (!should_terminate()) { // wait until started is set. sleepBeforeNextCycle(); - if (_should_terminate) { - _cm->root_regions()->cancel_scan(); + if (should_terminate()) { break; } + GCIdMark gc_id_mark; + + cm()->concurrent_cycle_start(); + assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC."); + + GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); { ResourceMark rm; HandleMark hm; double cycle_start = os::elapsedVTime(); { - GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks"); + G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks"); ClassLoaderDataGraph::clear_claimed_marks(); } @@ -148,22 +145,22 @@ // correctness issue. { - GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning"); - _cm->scanRootRegions(); + G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions"); + _cm->scan_root_regions(); } // It would be nice to use the GCTraceConcTime class here but // the "end" logging is inside the loop and not at the end of // a scope. Mimicking the same log output as GCTraceConcTime instead. jlong mark_start = os::elapsed_counter(); - log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); + log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); int iter = 0; do { iter++; if (!cm()->has_aborted()) { - GCConcPhaseTimer(_cm, "Concurrent Mark"); - _cm->markFromRoots(); + G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots"); + _cm->mark_from_roots(); } double mark_end_time = os::elapsedVTime(); @@ -171,18 +168,18 @@ _vtime_mark_accum += (mark_end_time - cycle_start); if (!cm()->has_aborted()) { delay_to_keep_mmu(g1_policy, true /* remark */); - log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms", - TimeHelper::counter_to_seconds(mark_start), - TimeHelper::counter_to_seconds(mark_end), - TimeHelper::counter_to_millis(mark_end - mark_start)); + log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms", + TimeHelper::counter_to_seconds(mark_start), + TimeHelper::counter_to_seconds(mark_end), + TimeHelper::counter_to_millis(mark_end - mark_start)); CMCheckpointRootsFinalClosure final_cl(_cm); VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */); VMThread::execute(&op); } if (cm()->restart_for_overflow()) { - log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter); - log_info(gc)("Concurrent Mark restart for overflow"); + log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter); + log_info(gc, marking)("Concurrent Mark Restart due to overflow"); } } while (cm()->restart_for_overflow()); @@ -216,11 +213,9 @@ // place, it would wait for us to process the regions // reclaimed by cleanup. - GCTraceConcTime(Info, gc) tt("Concurrent Cleanup"); - GCConcPhaseTimer(_cm, "Concurrent Cleanup"); - + G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup"); // Now do the concurrent cleanup operation. - _cm->completeCleanup(); + _cm->complete_cleanup(); // Notify anyone who's waiting that there are no more free // regions coming. We have to do this before we join the STS @@ -265,7 +260,7 @@ if (!cm()->has_aborted()) { g1_policy->record_concurrent_mark_cleanup_completed(); } else { - log_info(gc)("Concurrent Mark abort"); + log_info(gc, marking)("Concurrent Mark Abort"); } } @@ -274,8 +269,8 @@ // We may have aborted just before the remark. Do not bother clearing the // bitmap then, as it has been done during mark abort. if (!cm()->has_aborted()) { - GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing"); - _cm->clearNextBitmap(); + G1ConcPhaseTimer t(_cm, "Concurrent Cleanup for Next Mark"); + _cm->cleanup_for_next_mark(); } else { assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear"); } @@ -288,25 +283,11 @@ { SuspendibleThreadSetJoiner sts_join; g1h->increment_old_marking_cycles_completed(true /* concurrent */); - g1h->register_concurrent_cycle_end(); + + cm()->concurrent_cycle_end(); } } -} - -void ConcurrentMarkThread::stop() { - { - MutexLockerEx ml(Terminator_lock); - _should_terminate = true; - } - - stop_service(); - - { - MutexLockerEx ml(Terminator_lock); - while (!_has_terminated) { - Terminator_lock->wait(); - } - } + _cm->root_regions()->cancel_scan(); } void ConcurrentMarkThread::stop_service() { @@ -320,7 +301,7 @@ assert(!in_progress(), "should have been cleared"); MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); - while (!started() && !_should_terminate) { + while (!started() && !should_terminate()) { CGC_lock->wait(Mutex::_no_safepoint_check_flag); } @@ -328,16 +309,3 @@ set_in_progress(); } } - -// Note: As is the case with CMS - this method, although exported -// by the ConcurrentMarkThread, which is a non-JavaThread, can only -// be called by a JavaThread. Currently this is done at vm creation -// time (post-vm-init) by the main/Primordial (Java)Thread. -// XXX Consider changing this in the future to allow the CM thread -// itself to create this thread? -void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) { - assert(UseG1GC, "SLT thread needed only for concurrent GC"); - assert(THREAD->is_Java_thread(), "must be a Java thread"); - assert(_slt == NULL, "SLT already created"); - _slt = SurrogateLockerThread::make(THREAD); -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp --- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -38,13 +38,8 @@ double _vtime_start; // Initial virtual time. double _vtime_accum; // Accumulated virtual time. - double _vtime_mark_accum; - public: - virtual void run(); - - private: G1ConcurrentMark* _cm; enum State { @@ -61,15 +56,10 @@ void run_service(); void stop_service(); - static SurrogateLockerThread* _slt; - public: // Constructor ConcurrentMarkThread(G1ConcurrentMark* cm); - static void makeSurrogateLockerThread(TRAPS); - static SurrogateLockerThread* slt() { return _slt; } - // Total virtual time so far for this thread and concurrent marking tasks. double vtime_accum(); // Marking virtual time so far this thread and concurrent marking tasks. @@ -93,9 +83,6 @@ // as the CM thread might take some time to wake up before noticing // that started() is set and set in_progress(). bool during_cycle() { return !idle(); } - - // shutdown - void stop(); }; #endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp --- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -110,44 +110,6 @@ } } -bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, - bool consume, - uint worker_i) { - bool res = true; - if (_buf != NULL) { - res = apply_closure_to_buffer(cl, _buf, _index, _sz, - consume, - worker_i); - if (res && consume) { - _index = _sz; - } - } - return res; -} - -bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, - void** buf, - size_t index, size_t sz, - bool consume, - uint worker_i) { - if (cl == NULL) return true; - size_t limit = byte_index_to_index(sz); - for (size_t i = byte_index_to_index(index); i < limit; ++i) { - jbyte* card_ptr = static_cast(buf[i]); - if (card_ptr != NULL) { - // Set the entry to null, so we don't do it again (via the test - // above) if we reconsider this buffer. - if (consume) { - buf[i] = NULL; - } - if (!cl->do_card_ptr(card_ptr, worker_i)) { - return false; - } - } - } - return true; -} - DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) : PtrQueueSet(notify_when_complete), _mut_process_closure(NULL), @@ -188,22 +150,57 @@ t->dirty_card_queue().handle_zero_index(); } -bool DirtyCardQueueSet::mut_process_buffer(void** buf) { +bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl, + BufferNode* node, + bool consume, + uint worker_i) { + if (cl == NULL) return true; + bool result = true; + void** buf = BufferNode::make_buffer_from_node(node); + size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size()); + size_t i = DirtyCardQueue::byte_index_to_index(node->index()); + for ( ; i < limit; ++i) { + jbyte* card_ptr = static_cast(buf[i]); + assert(card_ptr != NULL, "invariant"); + if (!cl->do_card_ptr(card_ptr, worker_i)) { + result = false; // Incomplete processing. + break; + } + } + if (consume) { + size_t new_index = DirtyCardQueue::index_to_byte_index(i); + assert(new_index <= buffer_size(), "invariant"); + node->set_index(new_index); + } + return result; +} + +#ifndef ASSERT +#define assert_fully_consumed(node, buffer_size) +#else +#define assert_fully_consumed(node, buffer_size) \ + do { \ + size_t _afc_index = (node)->index(); \ + size_t _afc_size = (buffer_size); \ + assert(_afc_index == _afc_size, \ + "Buffer was not fully consumed as claimed: index: " \ + SIZE_FORMAT ", size: " SIZE_FORMAT, \ + _afc_index, _afc_size); \ + } while (0) +#endif // ASSERT + +bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) { guarantee(_free_ids != NULL, "must be"); - // claim a par id - uint worker_i = _free_ids->claim_par_id(); + uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id + bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i); + _free_ids->release_par_id(worker_i); // release the id - bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0, - _sz, true, worker_i); - if (b) { + if (result) { + assert_fully_consumed(node, buffer_size()); Atomic::inc(&_processed_buffers_mut); } - - // release the id - _free_ids->release_par_id(worker_i); - - return b; + return result; } @@ -239,49 +236,31 @@ if (nd == NULL) { return false; } else { - void** buf = BufferNode::make_buffer_from_node(nd); - size_t index = nd->index(); - if (DirtyCardQueue::apply_closure_to_buffer(cl, - buf, index, _sz, - true, worker_i)) { + if (apply_closure_to_buffer(cl, nd, true, worker_i)) { + assert_fully_consumed(nd, buffer_size()); // Done with fully processed buffer. - deallocate_buffer(buf); + deallocate_buffer(nd); Atomic::inc(&_processed_buffers_rs_thread); - return true; } else { // Return partially processed buffer to the queue. - enqueue_complete_buffer(buf, index); - return false; + guarantee(!during_pause, "Should never stop early"); + enqueue_complete_buffer(nd); } - } -} - -void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { - BufferNode* nd = _completed_buffers_head; - while (nd != NULL) { - bool b = - DirtyCardQueue::apply_closure_to_buffer(cl, - BufferNode::make_buffer_from_node(nd), - 0, _sz, false); - guarantee(b, "Should not stop early."); - nd = nd->next(); + return true; } } void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { BufferNode* nd = _cur_par_buffer_node; while (nd != NULL) { - BufferNode* next = (BufferNode*)nd->next(); - BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd); + BufferNode* next = nd->next(); + void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd); if (actual == nd) { - bool b = - DirtyCardQueue::apply_closure_to_buffer(cl, - BufferNode::make_buffer_from_node(actual), - 0, _sz, false); + bool b = apply_closure_to_buffer(cl, nd, false); guarantee(b, "Should not stop early."); nd = next; } else { - nd = actual; + nd = static_cast(actual); } } } @@ -304,7 +283,7 @@ while (buffers_to_delete != NULL) { BufferNode* nd = buffers_to_delete; buffers_to_delete = nd->next(); - deallocate_buffer(BufferNode::make_buffer_from_node(nd)); + deallocate_buffer(nd); } } @@ -320,6 +299,13 @@ shared_dirty_card_queue()->reset(); } +void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) { + if (!dcq.is_empty()) { + enqueue_complete_buffer( + BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index())); + dcq.reinitialize(); + } +} void DirtyCardQueueSet::concatenate_logs() { // Iterate over all the threads, if we find a partial log add it to @@ -329,23 +315,9 @@ _max_completed_queue = max_jint; assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); for (JavaThread* t = Threads::first(); t; t = t->next()) { - DirtyCardQueue& dcq = t->dirty_card_queue(); - if (dcq.size() != 0) { - void** buf = dcq.get_buf(); - // We must NULL out the unused entries, then enqueue. - size_t limit = dcq.byte_index_to_index(dcq.get_index()); - for (size_t i = 0; i < limit; ++i) { - buf[i] = NULL; - } - enqueue_complete_buffer(dcq.get_buf(), dcq.get_index()); - dcq.reinitialize(); - } + concatenate_log(t->dirty_card_queue()); } - if (_shared_dirty_card_queue.size() != 0) { - enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(), - _shared_dirty_card_queue.get_index()); - _shared_dirty_card_queue.reinitialize(); - } + concatenate_log(_shared_dirty_card_queue); // Restore the completed buffer queue limit. _max_completed_queue = save_max_completed_queue; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp --- a/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/dirtyCardQueue.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,7 +37,7 @@ public: // Process the card whose card table entry is "card_ptr". If returns // "false", terminate the iteration early. - virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0; + virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0; }; // A ptrQueue whose elements are "oops", pointers to object heads. @@ -52,23 +52,6 @@ // Process queue entries and release resources. void flush() { flush_impl(); } - // Apply the closure to all elements, and reset the index to make the - // buffer empty. If a closure application returns "false", return - // "false" immediately, halting the iteration. If "consume" is true, - // deletes processed entries from logs. - bool apply_closure(CardTableEntryClosure* cl, - bool consume = true, - uint worker_i = 0); - - // Apply the closure to all elements of "buf", down to "index" - // (inclusive.) If returns "false", then a closure application returned - // "false", and we return immediately. If "consume" is true, entries are - // set to NULL as they are processed, so they will not be processed again - // later. - static bool apply_closure_to_buffer(CardTableEntryClosure* cl, - void** buf, size_t index, size_t sz, - bool consume = true, - uint worker_i = 0); void **get_buf() { return _buf;} size_t get_index() { return _index;} void reinitialize() { _buf = 0; _sz = 0; _index = 0;} @@ -94,8 +77,19 @@ DirtyCardQueue _shared_dirty_card_queue; - // Override. - bool mut_process_buffer(void** buf); + // Apply the closure to the elements of "node" from it's index to + // buffer_size. If all closure applications return true, then + // returns true. Stops processing after the first closure + // application that returns false, and returns false from this + // function. If "consume" is true, the node's index is updated to + // exclude the processed elements, e.g. up to the element for which + // the closure returned false. + bool apply_closure_to_buffer(CardTableEntryClosure* cl, + BufferNode* node, + bool consume, + uint worker_i = 0); + + bool mut_process_buffer(BufferNode* node); // Protected by the _cbl_mon. FreeIdSet* _free_ids; @@ -107,6 +101,9 @@ // Current buffer node used for parallel iteration. BufferNode* volatile _cur_par_buffer_node; + + void concatenate_log(DirtyCardQueue& dcq); + public: DirtyCardQueueSet(bool notify_when_complete = true); @@ -125,13 +122,18 @@ static void handle_zero_index_for_thread(JavaThread* t); - // If there exists some completed buffer, pop it, then apply the - // specified closure to all its elements, nulling out those elements - // processed. If all elements are processed, returns "true". If no - // completed buffers exist, returns false. If a completed buffer exists, - // but is only partially completed before a "yield" happens, the - // partially completed buffer (with its processed elements set to NULL) - // is returned to the completed buffer set, and this call returns false. + // If there are more than stop_at completed buffers, pop one, apply + // the specified closure to its active elements, and return true. + // Otherwise return false. + // + // A completely processed buffer is freed. However, if a closure + // invocation returns false, processing is stopped and the partially + // processed buffer (with its index updated to exclude the processed + // elements, e.g. up to the element for which the closure returned + // false) is returned to the completed buffer set. + // + // If during_pause is true, stop_at must be zero, and the closure + // must never return false. bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl, uint worker_i, size_t stop_at, @@ -139,13 +141,10 @@ BufferNode* get_completed_buffer(size_t stop_at); - // Applies the current closure to all completed buffers, - // non-consumptively. - void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); - void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; } // Applies the current closure to all completed buffers, non-consumptively. - // Parallel version. + // Can be used in parallel, all callers using the iteration state initialized + // by reset_for_par_iteration. void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); DirtyCardQueue* shared_dirty_card_queue() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -26,6 +26,7 @@ #include "gc/g1/g1AllocRegion.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "memory/resourceArea.hpp" #include "runtime/orderAccess.inline.hpp" G1CollectedHeap* G1AllocRegion::_g1h = NULL; @@ -194,44 +195,53 @@ return (alloc_region == _dummy_region) ? NULL : alloc_region; } -#if G1_ALLOC_REGION_TRACING +#ifndef PRODUCT void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) { // All the calls to trace that set either just the size or the size - // and the result are considered part of level 2 tracing and are - // skipped during level 1 tracing. - if ((actual_word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) { - const size_t buffer_length = 128; - char hr_buffer[buffer_length]; - char rest_buffer[buffer_length]; + // and the result are considered part of detailed tracing and are + // skipped during other tracing. + + Log(gc, alloc, region) log; + + if (!log.is_debug()) { + return; + } - HeapRegion* alloc_region = _alloc_region; - if (alloc_region == NULL) { - jio_snprintf(hr_buffer, buffer_length, "NULL"); - } else if (alloc_region == _dummy_region) { - jio_snprintf(hr_buffer, buffer_length, "DUMMY"); + bool detailed_info = log.is_trace(); + + if ((actual_word_size == 0 && result == NULL) || detailed_info) { + ResourceMark rm; + outputStream* out; + if (detailed_info) { + out = log.trace_stream(); } else { - jio_snprintf(hr_buffer, buffer_length, - HR_FORMAT, HR_FORMAT_PARAMS(alloc_region)); + out = log.debug_stream(); } - if (G1_ALLOC_REGION_TRACING > 1) { - if (result != NULL) { - jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT, - min_word_size, desired_word_size, actual_word_size, result); - } else if (min_word_size != 0) { - jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size); - } else { - jio_snprintf(rest_buffer, buffer_length, ""); - } + out->print("%s: %u ", _name, _count); + + if (_alloc_region == NULL) { + out->print("NULL"); + } else if (_alloc_region == _dummy_region) { + out->print("DUMMY"); } else { - jio_snprintf(rest_buffer, buffer_length, ""); + out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region)); } - tty->print_cr("[%s] %u %s : %s %s", - _name, _count, hr_buffer, str, rest_buffer); + out->print(" : %s", str); + + if (detailed_info) { + if (result != NULL) { + out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT, + min_word_size, desired_word_size, actual_word_size, p2i(result)); + } else if (min_word_size != 0) { + out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size); + } + } + out->cr(); } } -#endif // G1_ALLOC_REGION_TRACING +#endif // PRODUCT G1AllocRegion::G1AllocRegion(const char* name, bool bot_updates) @@ -253,7 +263,7 @@ HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size, bool force) { assert(!force, "not supported for GC alloc regions"); - return _g1h->new_gc_alloc_region(word_size, count(), _purpose); + return _g1h->new_gc_alloc_region(word_size, _purpose); } void G1GCAllocRegion::retire_region(HeapRegion* alloc_region, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp --- a/hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1AllocRegion.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -31,9 +31,6 @@ class G1CollectedHeap; -// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing -#define G1_ALLOC_REGION_TRACING 0 - // A class that holds a region that is active in satisfying allocation // requests, potentially issued in parallel. When the active region is // full it will be retired and replaced with a new one. The @@ -213,19 +210,11 @@ // is returned after it's been retired. virtual HeapRegion* release(); -#if G1_ALLOC_REGION_TRACING void trace(const char* str, size_t min_word_size = 0, size_t desired_word_size = 0, size_t actual_word_size = 0, - HeapWord* result = NULL); -#else // G1_ALLOC_REGION_TRACING - void trace(const char* str, - size_t min_word_size = 0, - size_t desired_word_size = 0, - size_t actual_word_size = 0, - HeapWord* result = NULL) { } -#endif // G1_ALLOC_REGION_TRACING + HeapWord* result = NULL) PRODUCT_RETURN; }; class MutatorAllocRegion : public G1AllocRegion { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1Analytics.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1Analytics.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1Analytics.hpp" +#include "gc/g1/g1Predictions.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" +#include "utilities/numberSeq.hpp" + +// Different defaults for different number of GC threads +// They were chosen by running GCOld and SPECjbb on debris with different +// numbers of GC threads and choosing them based on the results + +// all the same +static double rs_length_diff_defaults[] = { + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 +}; + +static double cost_per_card_ms_defaults[] = { + 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 +}; + +// all the same +static double young_cards_per_entry_ratio_defaults[] = { + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 +}; + +static double cost_per_entry_ms_defaults[] = { + 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 +}; + +static double cost_per_byte_ms_defaults[] = { + 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 +}; + +// these should be pretty consistent +static double constant_other_time_ms_defaults[] = { + 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 +}; + + +static double young_other_cost_per_region_ms_defaults[] = { + 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 +}; + +static double non_young_other_cost_per_region_ms_defaults[] = { + 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 +}; + +G1Analytics::G1Analytics(const G1Predictions* predictor) : + _predictor(predictor), + _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _prev_collection_pause_end_ms(0.0), + _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), + _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), + _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), + _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), + _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), + _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) { + + // Seed sequences with initial values. + _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); + _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; + + int index = MIN2(ParallelGCThreads - 1, 7u); + + _rs_length_diff_seq->add(rs_length_diff_defaults[index]); + _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); + _cost_scan_hcc_seq->add(0.0); + _young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]); + _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); + _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); + _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); + _young_other_cost_per_region_ms_seq->add(young_other_cost_per_region_ms_defaults[index]); + _non_young_other_cost_per_region_ms_seq->add(non_young_other_cost_per_region_ms_defaults[index]); + + // start conservatively (around 50ms is about right) + _concurrent_mark_remark_times_ms->add(0.05); + _concurrent_mark_cleanup_times_ms->add(0.20); +} + +double G1Analytics::get_new_prediction(TruncatedSeq const* seq) const { + return _predictor->get_new_prediction(seq); +} + +size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const { + return (size_t)get_new_prediction(seq); +} + +int G1Analytics::num_alloc_rate_ms() const { + return _alloc_rate_ms_seq->num(); +} + +void G1Analytics::report_concurrent_mark_remark_times_ms(double ms) { + _concurrent_mark_remark_times_ms->add(ms); +} + +void G1Analytics::report_alloc_rate_ms(double alloc_rate) { + _alloc_rate_ms_seq->add(alloc_rate); +} + +void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { + _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms; + if (_recent_avg_pause_time_ratio < 0.0 || + (_recent_avg_pause_time_ratio - 1.0 > 0.0)) { + // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in + // CR 6902692 by redoing the manner in which the ratio is incrementally computed. + if (_recent_avg_pause_time_ratio < 0.0) { + _recent_avg_pause_time_ratio = 0.0; + } else { + assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); + _recent_avg_pause_time_ratio = 1.0; + } + } + + // Compute the ratio of just this last pause time to the entire time range stored + // in the vectors. Comparing this pause to the entire range, rather than only the + // most recent interval, has the effect of smoothing over a possible transient 'burst' + // of more frequent pauses that don't really reflect a change in heap occupancy. + // This reduces the likelihood of a needless heap expansion being triggered. + _last_pause_time_ratio = + (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; +} + +void G1Analytics::report_cost_per_card_ms(double cost_per_card_ms) { + _cost_per_card_ms_seq->add(cost_per_card_ms); +} + +void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) { + _cost_scan_hcc_seq->add(cost_scan_hcc); +} + +void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) { + if (last_gc_was_young) { + _cost_per_entry_ms_seq->add(cost_per_entry_ms); + } else { + _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); + } +} + +void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) { + if (last_gc_was_young) { + _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); + } else { + _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); + } +} + +void G1Analytics::report_rs_length_diff(double rs_length_diff) { + _rs_length_diff_seq->add(rs_length_diff); +} + +void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) { + if (in_marking_window) { + _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); + } else { + _cost_per_byte_ms_seq->add(cost_per_byte_ms); + } +} + +void G1Analytics::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) { + _young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); +} + +void G1Analytics::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) { + _non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); +} + +void G1Analytics::report_constant_other_time_ms(double constant_other_time_ms) { + _constant_other_time_ms_seq->add(constant_other_time_ms); +} + +void G1Analytics::report_pending_cards(double pending_cards) { + _pending_cards_seq->add(pending_cards); +} + +void G1Analytics::report_rs_lengths(double rs_lengths) { + _rs_lengths_seq->add(rs_lengths); +} + +size_t G1Analytics::predict_rs_length_diff() const { + return get_new_size_prediction(_rs_length_diff_seq); +} + +double G1Analytics::predict_alloc_rate_ms() const { + return get_new_prediction(_alloc_rate_ms_seq); +} + +double G1Analytics::predict_cost_per_card_ms() const { + return get_new_prediction(_cost_per_card_ms_seq); +} + +double G1Analytics::predict_scan_hcc_ms() const { + return get_new_prediction(_cost_scan_hcc_seq); +} + +double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const { + return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); +} + +double G1Analytics::predict_young_cards_per_entry_ratio() const { + return get_new_prediction(_young_cards_per_entry_ratio_seq); +} + +double G1Analytics::predict_mixed_cards_per_entry_ratio() const { + if (_mixed_cards_per_entry_ratio_seq->num() < 2) { + return predict_young_cards_per_entry_ratio(); + } else { + return get_new_prediction(_mixed_cards_per_entry_ratio_seq); + } +} + +size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const { + if (gcs_are_young) { + return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); + } else { + return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio()); + } +} + +double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const { + if (gcs_are_young) { + return card_num * get_new_prediction(_cost_per_entry_ms_seq); + } else { + return predict_mixed_rs_scan_time_ms(card_num); + } +} + +double G1Analytics::predict_mixed_rs_scan_time_ms(size_t card_num) const { + if (_mixed_cost_per_entry_ms_seq->num() < 3) { + return card_num * get_new_prediction(_cost_per_entry_ms_seq); + } else { + return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); + } +} + +double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { + if (_cost_per_byte_ms_during_cm_seq->num() < 3) { + return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); + } else { + return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); + } +} + +double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const { + if (during_concurrent_mark) { + return predict_object_copy_time_ms_during_cm(bytes_to_copy); + } else { + return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); + } +} + +double G1Analytics::predict_constant_other_time_ms() const { + return get_new_prediction(_constant_other_time_ms_seq); +} + +double G1Analytics::predict_young_other_time_ms(size_t young_num) const { + return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); +} + +double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const { + return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); +} + +double G1Analytics::predict_remark_time_ms() const { + return get_new_prediction(_concurrent_mark_remark_times_ms); +} + +double G1Analytics::predict_cleanup_time_ms() const { + return get_new_prediction(_concurrent_mark_cleanup_times_ms); +} + +size_t G1Analytics::predict_rs_lengths() const { + return get_new_size_prediction(_rs_lengths_seq); +} + +size_t G1Analytics::predict_pending_cards() const { + return get_new_size_prediction(_pending_cards_seq); +} + +double G1Analytics::last_known_gc_end_time_sec() const { + return _recent_prev_end_times_for_all_gcs_sec->oldest(); +} + +void G1Analytics::update_recent_gc_times(double end_time_sec, + double pause_time_ms) { + _recent_gc_times_ms->add(pause_time_ms); + _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); + _prev_collection_pause_end_ms = end_time_sec * 1000.0; +} + +void G1Analytics::report_concurrent_mark_cleanup_times_ms(double ms) { + _concurrent_mark_cleanup_times_ms->add(ms); +} + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1Analytics.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1Analytics.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP +#define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP + +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +class TruncatedSeq; +class G1Predictions; + +class G1Analytics: public CHeapObj { + const static int TruncatedSeqLength = 10; + const static int NumPrevPausesForHeuristics = 10; + const G1Predictions* _predictor; + + // These exclude marking times. + TruncatedSeq* _recent_gc_times_ms; + + TruncatedSeq* _concurrent_mark_remark_times_ms; + TruncatedSeq* _concurrent_mark_cleanup_times_ms; + + TruncatedSeq* _alloc_rate_ms_seq; + double _prev_collection_pause_end_ms; + + TruncatedSeq* _rs_length_diff_seq; + TruncatedSeq* _cost_per_card_ms_seq; + TruncatedSeq* _cost_scan_hcc_seq; + TruncatedSeq* _young_cards_per_entry_ratio_seq; + TruncatedSeq* _mixed_cards_per_entry_ratio_seq; + TruncatedSeq* _cost_per_entry_ms_seq; + TruncatedSeq* _mixed_cost_per_entry_ms_seq; + TruncatedSeq* _cost_per_byte_ms_seq; + TruncatedSeq* _constant_other_time_ms_seq; + TruncatedSeq* _young_other_cost_per_region_ms_seq; + TruncatedSeq* _non_young_other_cost_per_region_ms_seq; + + TruncatedSeq* _pending_cards_seq; + TruncatedSeq* _rs_lengths_seq; + + TruncatedSeq* _cost_per_byte_ms_during_cm_seq; + + // Statistics kept per GC stoppage, pause or full. + TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; + + // The ratio of gc time to elapsed time, computed over recent pauses, + // and the ratio for just the last pause. + double _recent_avg_pause_time_ratio; + double _last_pause_time_ratio; + + double get_new_prediction(TruncatedSeq const* seq) const; + size_t get_new_size_prediction(TruncatedSeq const* seq) const; + +public: + G1Analytics(const G1Predictions* predictor); + + double prev_collection_pause_end_ms() const { + return _prev_collection_pause_end_ms; + } + + double recent_avg_pause_time_ratio() const { + return _recent_avg_pause_time_ratio; + } + + double last_pause_time_ratio() const { + return _last_pause_time_ratio; + } + + uint number_of_recorded_pause_times() const { + return NumPrevPausesForHeuristics; + } + + void append_prev_collection_pause_end_ms(double ms) { + _prev_collection_pause_end_ms += ms; + } + + void report_concurrent_mark_remark_times_ms(double ms); + void report_concurrent_mark_cleanup_times_ms(double ms); + void report_alloc_rate_ms(double alloc_rate); + void report_cost_per_card_ms(double cost_per_card_ms); + void report_cost_scan_hcc(double cost_scan_hcc); + void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young); + void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young); + void report_rs_length_diff(double rs_length_diff); + void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window); + void report_young_other_cost_per_region_ms(double other_cost_per_region_ms); + void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms); + void report_constant_other_time_ms(double constant_other_time_ms); + void report_pending_cards(double pending_cards); + void report_rs_lengths(double rs_lengths); + + size_t predict_rs_length_diff() const; + + double predict_alloc_rate_ms() const; + int num_alloc_rate_ms() const; + + double predict_cost_per_card_ms() const; + + double predict_scan_hcc_ms() const; + + double predict_rs_update_time_ms(size_t pending_cards) const; + + double predict_young_cards_per_entry_ratio() const; + + double predict_mixed_cards_per_entry_ratio() const; + + size_t predict_card_num(size_t rs_length, bool gcs_are_young) const; + + double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const; + + double predict_mixed_rs_scan_time_ms(size_t card_num) const; + + double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; + + double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const; + + double predict_constant_other_time_ms() const; + + double predict_young_other_time_ms(size_t young_num) const; + + double predict_non_young_other_time_ms(size_t non_young_num) const; + + double predict_remark_time_ms() const; + + double predict_cleanup_time_ms() const; + + size_t predict_rs_lengths() const; + size_t predict_pending_cards() const; + + // Add a new GC of the given duration and end time to the record. + void update_recent_gc_times(double end_time_sec, double elapsed_ms); + void compute_pause_time_ratio(double interval_ms, double pause_time_ms); + + double last_known_gc_end_time_sec() const; +}; + +#endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -34,10 +34,12 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" +#include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1HeapTransition.hpp" #include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1MarkSweep.hpp" @@ -67,6 +69,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/init.hpp" @@ -566,7 +569,7 @@ // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::mem_allocate retries %d times", try_count); + log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count); } } @@ -675,8 +678,8 @@ // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_slow() " - "retries %d times", try_count); + log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() " + "retries %d times", try_count); } } @@ -1091,8 +1094,8 @@ if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_humongous() " - "retries %d times", try_count); + log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() " + "retries %d times", try_count); } } @@ -1228,6 +1231,7 @@ ResourceMark rm; print_heap_before_gc(); + print_heap_regions(); trace_heap_before_gc(gc_tracer); size_t metadata_prev_used = MetaspaceAux::used_bytes(); @@ -1302,9 +1306,9 @@ // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC. - abandon_collection_set(g1_policy()->inc_cset_head()); - g1_policy()->clear_incremental_cset(); - g1_policy()->stop_incremental_cset_building(); + abandon_collection_set(collection_set()->inc_head()); + collection_set()->clear_incremental(); + collection_set()->stop_incremental_building(); tear_down_region_sets(false /* free_list_only */); collector_state()->set_gcs_are_young(true); @@ -1421,13 +1425,13 @@ // the full GC has compacted objects and updated TAMS but not updated // the prev bitmap. if (G1VerifyBitmaps) { - ((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll(); + _cm->clear_prev_bitmap(workers()); } _verifier->check_bitmaps("Full GC End"); // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); + assert(collection_set()->head() == NULL, "must be"); + collection_set()->start_incremental_building(); clear_cset_fast_test(); @@ -1446,6 +1450,7 @@ heap_transition.print(); print_heap_after_gc(); + print_heap_regions(); trace_heap_after_gc(gc_tracer); post_full_gc_dump(gc_timer); @@ -1741,6 +1746,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : CollectedHeap(), _g1_policy(policy_), + _collection_set(this), _dirty_card_queue_set(false), _is_alive_closure_cm(this), _is_alive_closure_stw(this), @@ -1765,15 +1771,12 @@ _expand_heap_after_alloc_failure(true), _old_marking_cycles_started(0), _old_marking_cycles_completed(0), - _heap_summary_sent(false), _in_cset_fast_test(), _dirty_cards_region_list(NULL), _worker_cset_start_region(NULL), _worker_cset_start_region_time_stamp(NULL), _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()), - _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), - _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()), - _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) { + _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) { _workers = new WorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, @@ -1782,6 +1785,9 @@ _verifier = new G1HeapVerifier(this); _allocator = G1Allocator::create_allocator(this); + + _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics()); + _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); // Override the default _filler_array_max_size so that no humongous filler @@ -2314,52 +2320,6 @@ FullGCCount_lock->notify_all(); } -void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) { - GCIdMarkAndRestore conc_gc_id_mark; - collector_state()->set_concurrent_cycle_started(true); - _gc_timer_cm->register_gc_start(start_time); - - _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start()); - trace_heap_before_gc(_gc_tracer_cm); - _cmThread->set_gc_id(GCId::current()); -} - -void G1CollectedHeap::register_concurrent_cycle_end() { - if (collector_state()->concurrent_cycle_started()) { - GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id()); - if (_cm->has_aborted()) { - _gc_tracer_cm->report_concurrent_mode_failure(); - - // ConcurrentGCTimer will be ended as well. - _cm->register_concurrent_gc_end_and_stop_timer(); - } else { - _gc_timer_cm->register_gc_end(); - } - - _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); - - // Clear state variables to prepare for the next concurrent cycle. - collector_state()->set_concurrent_cycle_started(false); - _heap_summary_sent = false; - } -} - -void G1CollectedHeap::trace_heap_after_concurrent_cycle() { - if (collector_state()->concurrent_cycle_started()) { - // This function can be called when: - // the cleanup pause is run - // the concurrent cycle is aborted before the cleanup pause. - // the concurrent cycle is aborted after the cleanup pause, - // but before the concurrent cycle end has been registered. - // Make sure that we only send the heap information once. - if (!_heap_summary_sent) { - GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id()); - trace_heap_after_gc(_gc_tracer_cm); - _heap_summary_sent = true; - } - } -} - void G1CollectedHeap::collect(GCCause::Cause cause) { assert_heap_not_locked(); @@ -2545,8 +2505,8 @@ // p threads // Then thread t will start at region floor ((t * n) / p) - result = g1_policy()->collection_set(); - uint cs_size = g1_policy()->cset_region_length(); + result = collection_set()->head(); + uint cs_size = collection_set()->region_length(); uint active_workers = workers()->active_workers(); uint end_ind = (cs_size * worker_i) / active_workers; @@ -2577,7 +2537,7 @@ } void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { - HeapRegion* r = g1_policy()->collection_set(); + HeapRegion* r = collection_set()->head(); while (r != NULL) { HeapRegion* next = r->next_in_collection_set(); if (cl->doHeapRegion(r)) { @@ -2606,7 +2566,7 @@ } cur = next; } - cur = g1_policy()->collection_set(); + cur = collection_set()->head(); while (cur != r) { HeapRegion* next = cur->next_in_collection_set(); if (cl->doHeapRegion(cur) && false) { @@ -2716,6 +2676,14 @@ return false; // keep some compilers happy } +void G1CollectedHeap::print_heap_regions() const { + Log(gc, heap, region) log; + if (log.is_trace()) { + ResourceMark rm; + print_regions_on(log.trace_stream()); + } +} + void G1CollectedHeap::print_on(outputStream* st) const { st->print(" %-20s", "garbage-first heap"); st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", @@ -2729,18 +2697,14 @@ uint young_regions = _young_list->length(); st->print("%u young (" SIZE_FORMAT "K), ", young_regions, (size_t) young_regions * HeapRegion::GrainBytes / K); - uint survivor_regions = g1_policy()->recorded_survivor_regions(); + uint survivor_regions = _young_list->survivor_length(); st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions, (size_t) survivor_regions * HeapRegion::GrainBytes / K); st->cr(); MetaspaceAux::print_on(st); } -void G1CollectedHeap::print_extended_on(outputStream* st) const { - print_on(st); - - // Print the per-region information. - st->cr(); +void G1CollectedHeap::print_regions_on(outputStream* st) const { st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, " "HS=humongous(starts), HC=humongous(continues), " "CS=collection set, F=free, A=archive, TS=gc time stamp, " @@ -2750,6 +2714,13 @@ heap_region_iterate(&blk); } +void G1CollectedHeap::print_extended_on(outputStream* st) const { + print_on(st); + + // Print the per-region information. + print_regions_on(st); +} + void G1CollectedHeap::print_on_error(outputStream* st) const { this->CollectedHeap::print_on_error(st); @@ -2839,12 +2810,14 @@ size_t eden_used_bytes = young_list->eden_used_bytes(); size_t survivor_used_bytes = young_list->survivor_used_bytes(); + size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked(); size_t eden_capacity_bytes = (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes; VirtualSpaceSummary heap_summary = create_heap_space_summary(); - return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions()); + return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, + eden_capacity_bytes, survivor_used_bytes, num_regions()); } G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) { @@ -2862,7 +2835,6 @@ gc_tracer->report_metaspace_summary(when, metaspace_summary); } - G1CollectedHeap* G1CollectedHeap::heap() { CollectedHeap* heap = Universe::heap(); assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()"); @@ -3138,10 +3110,10 @@ } void G1CollectedHeap::print_taskqueue_stats() const { - if (!log_develop_is_enabled(Trace, gc, task, stats)) { + if (!log_is_enabled(Trace, gc, task, stats)) { return; } - LogHandle(gc, task, stats) log; + Log(gc, task, stats) log; ResourceMark rm; outputStream* st = log.trace_stream(); @@ -3201,15 +3173,19 @@ wait_for_root_region_scanning(); print_heap_before_gc(); + print_heap_regions(); trace_heap_before_gc(_gc_tracer_stw); _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); - // This call will decide whether this pause is an initial-mark - // pause. If it is, during_initial_mark_pause() will return true - // for the duration of this pause. - g1_policy()->decide_on_conc_mark_initiation(); + // We should not be doing initial mark unless the conc mark thread is running + if (!_cmThread->should_terminate()) { + // This call will decide whether this pause is an initial-mark + // pause. If it is, during_initial_mark_pause() will return true + // for the duration of this pause. + g1_policy()->decide_on_conc_mark_initiation(); + } // We do not allow initial-mark to be piggy-backed on a mixed GC. assert(!collector_state()->during_initial_mark_pause() || @@ -3231,7 +3207,7 @@ // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); - register_concurrent_cycle_start(_gc_timer_stw->gc_start()); + _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); @@ -3253,7 +3229,7 @@ Threads::number_of_non_daemon_threads()); workers()->set_active_workers(active_workers); - g1_policy()->note_gc_start(active_workers); + g1_policy()->note_gc_start(); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); @@ -3336,10 +3312,9 @@ concurrent_mark()->checkpointRootsInitialPre(); } - double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms); - g1_policy()->finalize_old_cset_part(time_remaining_ms); - - evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); + g1_policy()->finalize_collection_set(target_pause_time_ms); + + evacuation_info.set_collectionset_regions(collection_set()->region_length()); // Make sure the remembered sets are up to date. This needs to be // done before register_humongous_regions_with_cset(), because the @@ -3358,7 +3333,7 @@ _cm->verify_no_cset_oops(); if (_hr_printer.is_active()) { - HeapRegion* hr = g1_policy()->collection_set(); + HeapRegion* hr = collection_set()->head(); while (hr != NULL) { _hr_printer.cset(hr); hr = hr->next_in_collection_set(); @@ -3373,7 +3348,7 @@ // Initialize the GC alloc regions. _allocator->init_gc_alloc_regions(evacuation_info); - G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length()); + G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length()); pre_evacuate_collection_set(); // Actually do the work... @@ -3382,18 +3357,18 @@ post_evacuate_collection_set(evacuation_info, &per_thread_states); const size_t* surviving_young_words = per_thread_states.surviving_young_words(); - free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words); + free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words); eagerly_reclaim_humongous_regions(); - g1_policy()->clear_collection_set(); + collection_set()->clear_head(); record_obj_copy_mem_stats(); _survivor_evac_stats.adjust_desired_plab_sz(); _old_evac_stats.adjust_desired_plab_sz(); // Start a new incremental collection set for the next pause. - g1_policy()->start_incremental_cset_building(); + collection_set()->start_incremental_building(); clear_cset_fast_test(); @@ -3404,10 +3379,6 @@ assert(check_young_list_empty(false /* check_heap */), "young list should be empty"); - g1_policy()->record_survivor_regions(_young_list->survivor_length(), - _young_list->first_survivor_region(), - _young_list->last_survivor_region()); - _young_list->reset_auxilary_lists(); if (evacuation_failed()) { @@ -3442,7 +3413,7 @@ _allocator->init_mutator_alloc_region(); { - size_t expand_bytes = g1_policy()->expansion_amount(); + size_t expand_bytes = _heap_sizing_policy->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); // No need for an ergo logging here, @@ -3468,7 +3439,7 @@ size_t total_cards_scanned = per_thread_states.total_cards_scanned(); g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); - evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); + evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); MemoryService::track_memory_usage(); @@ -3538,6 +3509,7 @@ TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); + print_heap_regions(); trace_heap_after_gc(_gc_tracer_stw); // We must call G1MonitoringSupport::update_sizes() in the same scoping level @@ -3776,11 +3748,12 @@ "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); - log_debug(gc, stringdedup)("Cleaned string and symbol table, " - "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " - "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", - strings_processed(), strings_removed(), - symbols_processed(), symbols_removed()); + log_info(gc, stringtable)( + "Cleaned string and symbol table, " + "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " + "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", + strings_processed(), strings_removed(), + symbols_processed(), symbols_removed()); } void work(uint worker_id) { @@ -4083,14 +4056,10 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { - { + { // Timing scope G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); workers()->run_task(&g1_unlink_task); } - - if (G1StringDedup::is_enabled()) { - G1StringDedup::unlink(is_alive); - } } class G1RedirtyLoggedCardsTask : public AbstractGangTask { @@ -4279,7 +4248,7 @@ _workers(workers), _active_workers(n_workers) { - assert(n_workers > 0, "shouldn't call this otherwise"); + g1h->ref_processor_stw()->set_active_mt_degree(n_workers); } // Executes the given task using concurrent marking worker threads. @@ -4400,7 +4369,9 @@ _queues(task_queues), _terminator(workers, _queues), _n_workers(workers) - { } + { + g1h->ref_processor_cm()->set_active_mt_degree(workers); + } void work(uint worker_id) { G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id); @@ -4543,8 +4514,9 @@ uint no_of_gc_workers = workers()->active_workers(); // Parallel reference processing - assert(rp->num_q() == no_of_gc_workers, "sanity"); - assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); + assert(no_of_gc_workers <= rp->max_num_q(), + "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", + no_of_gc_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers); stats = rp->process_discovered_references(&is_alive, @@ -4580,8 +4552,9 @@ uint n_workers = workers()->active_workers(); - assert(rp->num_q() == n_workers, "sanity"); - assert(n_workers <= rp->max_num_q(), "sanity"); + assert(n_workers <= rp->max_num_q(), + "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", + n_workers, rp->max_num_q()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); rp->enqueue_discovered_references(&par_task_executor); @@ -4909,7 +4882,7 @@ if (cur->is_young()) { int index = cur->young_index_in_cset(); assert(index != -1, "invariant"); - assert((uint) index < policy->young_cset_region_length(), "invariant"); + assert((uint) index < collection_set()->young_region_length(), "invariant"); size_t words_survived = surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); @@ -5382,7 +5355,7 @@ assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); - g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); + collection_set()->add_eden_region(alloc_region); increase_used(allocated_bytes); _hr_printer.retire(alloc_region); // We update the eden sizes here, when the region is retired, @@ -5393,33 +5366,43 @@ // Methods for the GC alloc regions -HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, - uint count, - InCSetState dest) { +bool G1CollectedHeap::has_more_regions(InCSetState dest) { + if (dest.is_old()) { + return true; + } else { + return young_list()->survivor_length() < g1_policy()->max_survivor_regions(); + } +} + +HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) { assert(FreeList_lock->owned_by_self(), "pre-condition"); - if (count < g1_policy()->max_regions(dest)) { - const bool is_survivor = (dest.is_young()); - HeapRegion* new_alloc_region = new_region(word_size, - !is_survivor, - true /* do_expand */); - if (new_alloc_region != NULL) { - // We really only need to do this for old regions given that we - // should never scan survivors. But it doesn't hurt to do it - // for survivors too. - new_alloc_region->record_timestamp(); - if (is_survivor) { - new_alloc_region->set_survivor(); - _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region); - } else { - new_alloc_region->set_old(); - _verifier->check_bitmaps("Old Region Allocation", new_alloc_region); - } - _hr_printer.alloc(new_alloc_region); - bool during_im = collector_state()->during_initial_mark_pause(); - new_alloc_region->note_start_of_copying(during_im); - return new_alloc_region; + if (!has_more_regions(dest)) { + return NULL; + } + + const bool is_survivor = dest.is_young(); + + HeapRegion* new_alloc_region = new_region(word_size, + !is_survivor, + true /* do_expand */); + if (new_alloc_region != NULL) { + // We really only need to do this for old regions given that we + // should never scan survivors. But it doesn't hurt to do it + // for survivors too. + new_alloc_region->record_timestamp(); + if (is_survivor) { + new_alloc_region->set_survivor(); + young_list()->add_survivor_region(new_alloc_region); + _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region); + } else { + new_alloc_region->set_old(); + _verifier->check_bitmaps("Old Region Allocation", new_alloc_region); } + _hr_printer.alloc(new_alloc_region); + bool during_im = collector_state()->during_initial_mark_pause(); + new_alloc_region->note_start_of_copying(during_im); + return new_alloc_region; } return NULL; } @@ -5430,9 +5413,7 @@ bool during_im = collector_state()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); - if (dest.is_young()) { - young_list()->add_survivor_region(alloc_region); - } else { + if (dest.is_old()) { _old_set.add(alloc_region); } _hr_printer.retire(alloc_region); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,6 +28,7 @@ #include "gc/g1/evacuationInfo.hpp" #include "gc/g1/g1AllocationContext.hpp" #include "gc/g1/g1BiasedArray.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1HRPrinter.hpp" @@ -65,17 +66,16 @@ class SpaceClosure; class CompactibleSpaceClosure; class Space; +class G1CollectionSet; class G1CollectorPolicy; class G1RemSet; class HeapRegionRemSetIterator; class G1ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; -class ConcurrentGCTimer; class GenerationCounters; class STWGCTimer; class G1NewTracer; -class G1OldTracer; class EvacuationFailedInfo; class nmethod; class Ticks; @@ -83,6 +83,7 @@ class G1Allocator; class G1ArchiveAllocator; class G1HeapVerifier; +class G1HeapSizingPolicy; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -268,8 +269,6 @@ // concurrent cycles) we have completed. volatile uint _old_marking_cycles_completed; - bool _heap_summary_sent; - // This is a non-product method that is helpful for testing. It is // called at the end of a GC and artificially expands the heap by // allocating a number of dead regions. This way we can induce very @@ -362,6 +361,9 @@ // The current policy object for the collector. G1CollectorPolicy* _g1_policy; + G1HeapSizingPolicy* _heap_sizing_policy; + + G1CollectionSet _collection_set; // This is the second level of trying to allocate a new region. If // new_region() didn't find a region on the free_list, this call will @@ -469,8 +471,8 @@ size_t allocated_bytes); // For GC alloc regions. - HeapRegion* new_gc_alloc_region(size_t word_size, uint count, - InCSetState dest); + bool has_more_regions(InCSetState dest); + HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest); void retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest); @@ -618,10 +620,6 @@ return _old_marking_cycles_completed; } - void register_concurrent_cycle_start(const Ticks& start_time); - void register_concurrent_cycle_end(); - void trace_heap_after_concurrent_cycle(); - G1HRPrinter* hr_printer() { return &_hr_printer; } // Allocates a new heap region instance. @@ -896,9 +894,7 @@ ReferenceProcessor* _ref_processor_stw; STWGCTimer* _gc_timer_stw; - ConcurrentGCTimer* _gc_timer_cm; - G1OldTracer* _gc_tracer_cm; G1NewTracer* _gc_tracer_stw; // During reference object discovery, the _is_alive_non_header @@ -985,6 +981,9 @@ // The current policy object for the collector. G1CollectorPolicy* g1_policy() const { return _g1_policy; } + const G1CollectionSet* collection_set() const { return &_collection_set; } + G1CollectionSet* collection_set() { return &_collection_set; } + virtual CollectorPolicy* collector_policy() const; // Adaptive size policy. No such thing for g1. @@ -1029,9 +1028,6 @@ // The Concurrent Marking reference processor... ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } - ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } - G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } - virtual size_t capacity() const; virtual size_t used() const; // This should be called when we're not holding the heap lock. The @@ -1285,6 +1281,12 @@ return true; } + // The reference pending list lock is acquired from from the + // ConcurrentMarkThread. + virtual bool needs_reference_pending_list_locker_thread() const { + return true; + } + inline bool is_in_young(const oop obj); virtual bool is_scavengable(const void* addr); @@ -1463,7 +1465,11 @@ G1EvacSummary create_g1_evac_summary(G1EvacStats* stats); // Printing +private: + void print_heap_regions() const; + void print_regions_on(outputStream* st) const; +public: virtual void print_on(outputStream* st) const; virtual void print_extended_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1CollectionSet.hpp" +#include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/g1/g1CollectorState.hpp" +#include "gc/g1/heapRegion.inline.hpp" +#include "gc/g1/heapRegionRemSet.hpp" +#include "gc/g1/heapRegionSet.hpp" +#include "utilities/debug.hpp" + +G1CollectorState* G1CollectionSet::collector_state() { + return _g1->collector_state(); +} + +G1GCPhaseTimes* G1CollectionSet::phase_times() { + return _policy->phase_times(); +} + +CollectionSetChooser* G1CollectionSet::cset_chooser() { + return _cset_chooser; +} + +double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) { + return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); +} + + +G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h) : + _g1(g1h), + _policy(NULL), + _cset_chooser(new CollectionSetChooser()), + _eden_region_length(0), + _survivor_region_length(0), + _old_region_length(0), + + _head(NULL), + _bytes_used_before(0), + _recorded_rs_lengths(0), + // Incremental CSet attributes + _inc_build_state(Inactive), + _inc_head(NULL), + _inc_tail(NULL), + _inc_bytes_used_before(0), + _inc_recorded_rs_lengths(0), + _inc_recorded_rs_lengths_diffs(0), + _inc_predicted_elapsed_time_ms(0.0), + _inc_predicted_elapsed_time_ms_diffs(0.0) {} + +G1CollectionSet::~G1CollectionSet() { + delete _cset_chooser; +} + +void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, + uint survivor_cset_region_length) { + _eden_region_length = eden_cset_region_length; + _survivor_region_length = survivor_cset_region_length; + _old_region_length = 0; +} + +void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) { + _recorded_rs_lengths = rs_lengths; +} + +// Add the heap region at the head of the non-incremental collection set +void G1CollectionSet::add_old_region(HeapRegion* hr) { + assert(_inc_build_state == Active, "Precondition"); + assert(hr->is_old(), "the region should be old"); + + assert(!hr->in_collection_set(), "should not already be in the CSet"); + _g1->register_old_region_with_cset(hr); + hr->set_next_in_collection_set(_head); + _head = hr; + _bytes_used_before += hr->used(); + size_t rs_length = hr->rem_set()->occupied(); + _recorded_rs_lengths += rs_length; + _old_region_length += 1; +} + +// Initialize the per-collection-set information +void G1CollectionSet::start_incremental_building() { + assert(_inc_build_state == Inactive, "Precondition"); + + _inc_head = NULL; + _inc_tail = NULL; + _inc_bytes_used_before = 0; + + _inc_recorded_rs_lengths = 0; + _inc_recorded_rs_lengths_diffs = 0; + _inc_predicted_elapsed_time_ms = 0.0; + _inc_predicted_elapsed_time_ms_diffs = 0.0; + _inc_build_state = Active; +} + +void G1CollectionSet::finalize_incremental_building() { + assert(_inc_build_state == Active, "Precondition"); + assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); + + // The two "main" fields, _inc_recorded_rs_lengths and + // _inc_predicted_elapsed_time_ms, are updated by the thread + // that adds a new region to the CSet. Further updates by the + // concurrent refinement thread that samples the young RSet lengths + // are accumulated in the *_diffs fields. Here we add the diffs to + // the "main" fields. + + if (_inc_recorded_rs_lengths_diffs >= 0) { + _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs; + } else { + // This is defensive. The diff should in theory be always positive + // as RSets can only grow between GCs. However, given that we + // sample their size concurrently with other threads updating them + // it's possible that we might get the wrong size back, which + // could make the calculations somewhat inaccurate. + size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs); + if (_inc_recorded_rs_lengths >= diffs) { + _inc_recorded_rs_lengths -= diffs; + } else { + _inc_recorded_rs_lengths = 0; + } + } + _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs; + + _inc_recorded_rs_lengths_diffs = 0; + _inc_predicted_elapsed_time_ms_diffs = 0.0; +} + +void G1CollectionSet::update_young_region_prediction(HeapRegion* hr, + size_t new_rs_length) { + // Update the CSet information that is dependent on the new RS length + assert(hr->is_young(), "Precondition"); + assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint"); + + // We could have updated _inc_recorded_rs_lengths and + // _inc_predicted_elapsed_time_ms directly but we'd need to do + // that atomically, as this code is executed by a concurrent + // refinement thread, potentially concurrently with a mutator thread + // allocating a new region and also updating the same fields. To + // avoid the atomic operations we accumulate these updates on two + // separate fields (*_diffs) and we'll just add them to the "main" + // fields at the start of a GC. + + ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); + ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; + _inc_recorded_rs_lengths_diffs += rs_lengths_diff; + + double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); + double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr); + double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; + _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; + + hr->set_recorded_rs_length(new_rs_length); + hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); +} + +void G1CollectionSet::add_young_region_common(HeapRegion* hr) { + assert(hr->is_young(), "invariant"); + assert(hr->young_index_in_cset() > -1, "should have already been set"); + assert(_inc_build_state == Active, "Precondition"); + + // This routine is used when: + // * adding survivor regions to the incremental cset at the end of an + // evacuation pause or + // * adding the current allocation region to the incremental cset + // when it is retired. + // Therefore this routine may be called at a safepoint by the + // VM thread, or in-between safepoints by mutator threads (when + // retiring the current allocation region) + // We need to clear and set the cached recorded/cached collection set + // information in the heap region here (before the region gets added + // to the collection set). An individual heap region's cached values + // are calculated, aggregated with the policy collection set info, + // and cached in the heap region here (initially) and (subsequently) + // by the Young List sampling code. + + size_t rs_length = hr->rem_set()->occupied(); + double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr); + + // Cache the values we have added to the aggregated information + // in the heap region in case we have to remove this region from + // the incremental collection set, or it is updated by the + // rset sampling code + hr->set_recorded_rs_length(rs_length); + hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); + + size_t used_bytes = hr->used(); + _inc_recorded_rs_lengths += rs_length; + _inc_predicted_elapsed_time_ms += region_elapsed_time_ms; + _inc_bytes_used_before += used_bytes; + + assert(!hr->in_collection_set(), "invariant"); + _g1->register_young_region_with_cset(hr); + assert(hr->next_in_collection_set() == NULL, "invariant"); +} + +// Add the region at the RHS of the incremental cset +void G1CollectionSet::add_survivor_regions(HeapRegion* hr) { + // We should only ever be appending survivors at the end of a pause + assert(hr->is_survivor(), "Logic"); + + // Do the 'common' stuff + add_young_region_common(hr); + + // Now add the region at the right hand side + if (_inc_tail == NULL) { + assert(_inc_head == NULL, "invariant"); + _inc_head = hr; + } else { + _inc_tail->set_next_in_collection_set(hr); + } + _inc_tail = hr; +} + +// Add the region to the LHS of the incremental cset +void G1CollectionSet::add_eden_region(HeapRegion* hr) { + // Survivors should be added to the RHS at the end of a pause + assert(hr->is_eden(), "Logic"); + + // Do the 'common' stuff + add_young_region_common(hr); + + // Add the region at the left hand side + hr->set_next_in_collection_set(_inc_head); + if (_inc_head == NULL) { + assert(_inc_tail == NULL, "Invariant"); + _inc_tail = hr; + } + _inc_head = hr; +} + +#ifndef PRODUCT +void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) { + assert(list_head == inc_head() || list_head == head(), "must be"); + + st->print_cr("\nCollection_set:"); + HeapRegion* csr = list_head; + while (csr != NULL) { + HeapRegion* next = csr->next_in_collection_set(); + assert(csr->in_collection_set(), "bad CS"); + st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", + HR_FORMAT_PARAMS(csr), + p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()), + csr->age_in_surv_rate_group_cond()); + csr = next; + } +} +#endif // !PRODUCT + +double G1CollectionSet::finalize_young_part(double target_pause_time_ms) { + double young_start_time_sec = os::elapsedTime(); + + YoungList* young_list = _g1->young_list(); + finalize_incremental_building(); + + guarantee(target_pause_time_ms > 0.0, + "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); + guarantee(_head == NULL, "Precondition"); + + size_t pending_cards = _policy->pending_cards(); + double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards); + double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); + + log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms", + pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); + + collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); + + // The young list is laid with the survivor regions from the previous + // pause are appended to the RHS of the young list, i.e. + // [Newly Young Regions ++ Survivors from last pause]. + + uint survivor_region_length = young_list->survivor_length(); + uint eden_region_length = young_list->eden_length(); + init_region_lengths(eden_region_length, survivor_region_length); + + HeapRegion* hr = young_list->first_survivor_region(); + while (hr != NULL) { + assert(hr->is_survivor(), "badly formed young list"); + // There is a convention that all the young regions in the CSet + // are tagged as "eden", so we do this for the survivors here. We + // use the special set_eden_pre_gc() as it doesn't check that the + // region is free (which is not the case here). + hr->set_eden_pre_gc(); + hr = hr->get_next_young_region(); + } + + // Clear the fields that point to the survivor list - they are all young now. + young_list->clear_survivors(); + + _head = _inc_head; + _bytes_used_before = _inc_bytes_used_before; + time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0); + + log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms", + eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms); + + // The number of recorded young regions is the incremental + // collection set's current size + set_recorded_rs_lengths(_inc_recorded_rs_lengths); + + double young_end_time_sec = os::elapsedTime(); + phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); + + return time_remaining_ms; +} + +void G1CollectionSet::finalize_old_part(double time_remaining_ms) { + double non_young_start_time_sec = os::elapsedTime(); + double predicted_old_time_ms = 0.0; + + if (!collector_state()->gcs_are_young()) { + cset_chooser()->verify(); + const uint min_old_cset_length = _policy->calc_min_old_cset_length(); + const uint max_old_cset_length = _policy->calc_max_old_cset_length(); + + uint expensive_region_num = 0; + bool check_time_remaining = _policy->adaptive_young_list_length(); + + HeapRegion* hr = cset_chooser()->peek(); + while (hr != NULL) { + if (old_region_length() >= max_old_cset_length) { + // Added maximum number of old regions to the CSet. + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions", + old_region_length(), max_old_cset_length); + break; + } + + // Stop adding regions if the remaining reclaimable space is + // not above G1HeapWastePercent. + size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); + double reclaimable_perc = _policy->reclaimable_bytes_perc(reclaimable_bytes); + double threshold = (double) G1HeapWastePercent; + if (reclaimable_perc <= threshold) { + // We've added enough old regions that the amount of uncollected + // reclaimable space is at or below the waste threshold. Stop + // adding old regions to the CSet. + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). " + "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%", + old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); + break; + } + + double predicted_time_ms = predict_region_elapsed_time_ms(hr); + if (check_time_remaining) { + if (predicted_time_ms > time_remaining_ms) { + // Too expensive for the current CSet. + + if (old_region_length() >= min_old_cset_length) { + // We have added the minimum number of old regions to the CSet, + // we are done with this CSet. + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). " + "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions", + predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length); + break; + } + + // We'll add it anyway given that we haven't reached the + // minimum number of old regions. + expensive_region_num += 1; + } + } else { + if (old_region_length() >= min_old_cset_length) { + // In the non-auto-tuning case, we'll finish adding regions + // to the CSet if we reach the minimum. + + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions", + old_region_length(), min_old_cset_length); + break; + } + } + + // We will add this region to the CSet. + time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); + predicted_old_time_ms += predicted_time_ms; + cset_chooser()->pop(); // already have region via peek() + _g1->old_set_remove(hr); + add_old_region(hr); + + hr = cset_chooser()->peek(); + } + if (hr == NULL) { + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)"); + } + + if (expensive_region_num > 0) { + // We print the information once here at the end, predicated on + // whether we added any apparently expensive regions or not, to + // avoid generating output per region. + log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)." + "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms", + old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms); + } + + cset_chooser()->verify(); + } + + stop_incremental_building(); + + log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f", + old_region_length(), predicted_old_time_ms, time_remaining_ms); + + double non_young_end_time_sec = os::elapsedTime(); + phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1COLLECTIONSET_HPP +#define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP + +#include "gc/g1/collectionSetChooser.hpp" +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +class G1CollectedHeap; +class G1CollectorPolicy; +class G1CollectorState; +class G1GCPhaseTimes; +class HeapRegion; + +class G1CollectionSet VALUE_OBJ_CLASS_SPEC { + G1CollectedHeap* _g1; + G1CollectorPolicy* _policy; + + CollectionSetChooser* _cset_chooser; + + uint _eden_region_length; + uint _survivor_region_length; + uint _old_region_length; + + // The head of the list (via "next_in_collection_set()") representing the + // current collection set. Set from the incrementally built collection + // set at the start of the pause. + HeapRegion* _head; + + // The number of bytes in the collection set before the pause. Set from + // the incrementally built collection set at the start of an evacuation + // pause, and incremented in finalize_old_part() when adding old regions + // (if any) to the collection set. + size_t _bytes_used_before; + + size_t _recorded_rs_lengths; + + // The associated information that is maintained while the incremental + // collection set is being built with young regions. Used to populate + // the recorded info for the evacuation pause. + + enum CSetBuildType { + Active, // We are actively building the collection set + Inactive // We are not actively building the collection set + }; + + CSetBuildType _inc_build_state; + + // The head of the incrementally built collection set. + HeapRegion* _inc_head; + + // The tail of the incrementally built collection set. + HeapRegion* _inc_tail; + + // The number of bytes in the incrementally built collection set. + // Used to set _collection_set_bytes_used_before at the start of + // an evacuation pause. + size_t _inc_bytes_used_before; + + // The RSet lengths recorded for regions in the CSet. It is updated + // by the thread that adds a new region to the CSet. We assume that + // only one thread can be allocating a new CSet region (currently, + // it does so after taking the Heap_lock) hence no need to + // synchronize updates to this field. + size_t _inc_recorded_rs_lengths; + + // A concurrent refinement thread periodically samples the young + // region RSets and needs to update _inc_recorded_rs_lengths as + // the RSets grow. Instead of having to synchronize updates to that + // field we accumulate them in this field and add it to + // _inc_recorded_rs_lengths_diffs at the start of a GC. + ssize_t _inc_recorded_rs_lengths_diffs; + + // The predicted elapsed time it will take to collect the regions in + // the CSet. This is updated by the thread that adds a new region to + // the CSet. See the comment for _inc_recorded_rs_lengths about + // MT-safety assumptions. + double _inc_predicted_elapsed_time_ms; + + // See the comment for _inc_recorded_rs_lengths_diffs. + double _inc_predicted_elapsed_time_ms_diffs; + + G1CollectorState* collector_state(); + G1GCPhaseTimes* phase_times(); + + double predict_region_elapsed_time_ms(HeapRegion* hr); + +public: + G1CollectionSet(G1CollectedHeap* g1h); + ~G1CollectionSet(); + + void set_policy(G1CollectorPolicy* g1p) { + assert(_policy == NULL, "should only initialize once"); + _policy = g1p; + } + + CollectionSetChooser* cset_chooser(); + + void init_region_lengths(uint eden_cset_region_length, + uint survivor_cset_region_length); + + void set_recorded_rs_lengths(size_t rs_lengths); + + uint region_length() const { return young_region_length() + + old_region_length(); } + uint young_region_length() const { return eden_region_length() + + survivor_region_length(); } + + uint eden_region_length() const { return _eden_region_length; } + uint survivor_region_length() const { return _survivor_region_length; } + uint old_region_length() const { return _old_region_length; } + + // Incremental CSet Support + + // The head of the incrementally built collection set. + HeapRegion* inc_head() { return _inc_head; } + + // The tail of the incrementally built collection set. + HeapRegion* inc_tail() { return _inc_tail; } + + // Initialize incremental collection set info. + void start_incremental_building(); + + // Perform any final calculations on the incremental CSet fields + // before we can use them. + void finalize_incremental_building(); + + void clear_incremental() { + _inc_head = NULL; + _inc_tail = NULL; + } + + // Stop adding regions to the incremental collection set + void stop_incremental_building() { _inc_build_state = Inactive; } + + // The head of the list (via "next_in_collection_set()") representing the + // current collection set. + HeapRegion* head() { return _head; } + + void clear_head() { _head = NULL; } + + size_t recorded_rs_lengths() { return _recorded_rs_lengths; } + + size_t bytes_used_before() const { + return _bytes_used_before; + } + + void reset_bytes_used_before() { + _bytes_used_before = 0; + } + + // Choose a new collection set. Marks the chosen regions as being + // "in_collection_set", and links them together. The head and number of + // the collection set are available via access methods. + double finalize_young_part(double target_pause_time_ms); + void finalize_old_part(double time_remaining_ms); + + // Add old region "hr" to the CSet. + void add_old_region(HeapRegion* hr); + + // Update information about hr in the aggregated information for + // the incrementally built collection set. + void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length); + + // Add hr to the LHS of the incremental collection set. + void add_eden_region(HeapRegion* hr); + + // Add hr to the RHS of the incremental collection set. + void add_survivor_regions(HeapRegion* hr); + +#ifndef PRODUCT + void print(HeapRegion* list_head, outputStream* st); +#endif // !PRODUCT + +private: + // Update the incremental cset information when adding a region + // (should not be called directly). + void add_young_region_common(HeapRegion* hr); + +}; + +#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,11 +25,14 @@ #include "precompiled.hpp" #include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" +#include "gc/g1/g1Analytics.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1IHOPControl.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" +#include "gc/g1/g1YoungGenSizer.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/shared/gcPolicyCounters.hpp" @@ -39,107 +42,14 @@ #include "utilities/debug.hpp" #include "utilities/pair.hpp" -// Different defaults for different number of GC threads -// They were chosen by running GCOld and SPECjbb on debris with different -// numbers of GC threads and choosing them based on the results - -// all the same -static double rs_length_diff_defaults[] = { - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 -}; - -static double cost_per_card_ms_defaults[] = { - 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 -}; - -// all the same -static double young_cards_per_entry_ratio_defaults[] = { - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 -}; - -static double cost_per_entry_ms_defaults[] = { - 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 -}; - -static double cost_per_byte_ms_defaults[] = { - 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 -}; - -// these should be pretty consistent -static double constant_other_time_ms_defaults[] = { - 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 -}; - - -static double young_other_cost_per_region_ms_defaults[] = { - 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 -}; - -static double non_young_other_cost_per_region_ms_defaults[] = { - 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 -}; - G1CollectorPolicy::G1CollectorPolicy() : _predictor(G1ConfidencePercent / 100.0), - - _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _prev_collection_pause_end_ms(0.0), - _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), - _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _non_young_other_cost_per_region_ms_seq( - new TruncatedSeq(TruncatedSeqLength)), - - _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), - _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), - + _analytics(new G1Analytics(&_predictor)), _pause_time_target_ms((double) MaxGCPauseMillis), - - _recent_prev_end_times_for_all_gcs_sec( - new TruncatedSeq(NumPrevPausesForHeuristics)), - - _recent_avg_pause_time_ratio(0.0), _rs_lengths_prediction(0), _max_survivor_regions(0), - - _eden_cset_region_length(0), - _survivor_cset_region_length(0), - _old_cset_region_length(0), - - _collection_set(NULL), - _collection_set_bytes_used_before(0), - - // Incremental CSet attributes - _inc_cset_build_state(Inactive), - _inc_cset_head(NULL), - _inc_cset_tail(NULL), - _inc_cset_bytes_used_before(0), - _inc_cset_recorded_rs_lengths(0), - _inc_cset_recorded_rs_lengths_diffs(0), - _inc_cset_predicted_elapsed_time_ms(0.0), - _inc_cset_predicted_elapsed_time_ms_diffs(0.0), - - // add here any more surv rate groups - _recorded_survivor_regions(0), - _recorded_survivor_head(NULL), - _recorded_survivor_tail(NULL), _survivors_age_table(true), - _gc_overhead_perc(0.0), - _bytes_allocated_in_old_since_last_gc(0), _ihop_control(NULL), _initial_mark_to_mixed() { @@ -165,27 +75,8 @@ HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); HeapRegionRemSet::setup_remset_size(); - _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); - _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; - clear_ratio_check_data(); - _phase_times = new G1GCPhaseTimes(ParallelGCThreads); - int index = MIN2(ParallelGCThreads - 1, 7u); - - _rs_length_diff_seq->add(rs_length_diff_defaults[index]); - _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); - _cost_scan_hcc_seq->add(0.0); - _young_cards_per_entry_ratio_seq->add( - young_cards_per_entry_ratio_defaults[index]); - _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); - _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); - _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); - _young_other_cost_per_region_ms_seq->add( - young_other_cost_per_region_ms_defaults[index]); - _non_young_other_cost_per_region_ms_seq->add( - non_young_other_cost_per_region_ms_defaults[index]); - // Below, we might need to calculate the pause time target based on // the pause interval. When we do so we are going to give G1 maximum // flexibility and allow it to do pauses when it needs to. So, we'll @@ -198,18 +89,7 @@ // First make sure that, if either parameter is set, its value is // reasonable. - if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { - if (MaxGCPauseMillis < 1) { - vm_exit_during_initialization("MaxGCPauseMillis should be " - "greater than 0"); - } - } - if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { - if (GCPauseIntervalMillis < 1) { - vm_exit_during_initialization("GCPauseIntervalMillis should be " - "greater than 0"); - } - } + guarantee(MaxGCPauseMillis >= 1, "Range checking for MaxGCPauseMillis should guarantee that value is >= 1"); // Then, if the pause time target parameter was not set, set it to // the default value. @@ -231,45 +111,22 @@ if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); } - - // Finally, make sure that the two parameters are consistent. - if (MaxGCPauseMillis >= GCPauseIntervalMillis) { - char buffer[256]; - jio_snprintf(buffer, 256, - "MaxGCPauseMillis (%u) should be less than " - "GCPauseIntervalMillis (%u)", - MaxGCPauseMillis, GCPauseIntervalMillis); - vm_exit_during_initialization(buffer); - } + guarantee(GCPauseIntervalMillis >= 1, "Constraint for GCPauseIntervalMillis should guarantee that value is >= 1"); + guarantee(GCPauseIntervalMillis > MaxGCPauseMillis, "Constraint for GCPauseIntervalMillis should guarantee that GCPauseIntervalMillis > MaxGCPauseMillis"); double max_gc_time = (double) MaxGCPauseMillis / 1000.0; double time_slice = (double) GCPauseIntervalMillis / 1000.0; _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); - // start conservatively (around 50ms is about right) - _concurrent_mark_remark_times_ms->add(0.05); - _concurrent_mark_cleanup_times_ms->add(0.20); _tenuring_threshold = MaxTenuringThreshold; - assert(GCTimeRatio > 0, - "we should have set it to a default value set_g1_gc_flags() " - "if a user set it to 0"); - _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); - uintx reserve_perc = G1ReservePercent; - // Put an artificial ceiling on this so that it's not set to a silly value. - if (reserve_perc > 50) { - reserve_perc = 50; - warning("G1ReservePercent is set to a value that is too large, " - "it's been updated to " UINTX_FORMAT, reserve_perc); - } - _reserve_factor = (double) reserve_perc / 100.0; + guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50."); + _reserve_factor = (double) G1ReservePercent / 100.0; // This will be set when the heap is expanded // for the first time during initialization. _reserve_regions = 0; - _cset_chooser = new CollectionSetChooser(); - _ihop_control = create_ihop_control(); } @@ -277,14 +134,6 @@ delete _ihop_control; } -double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { - return _predictor.get_new_prediction(seq); -} - -size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { - return (size_t)get_new_prediction(seq); -} - void G1CollectorPolicy::initialize_alignments() { _space_alignment = HeapRegion::GrainBytes; size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); @@ -294,177 +143,6 @@ G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } -// There are three command line options related to the young gen size: -// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is -// just a short form for NewSize==MaxNewSize). G1 will use its internal -// heuristics to calculate the actual young gen size, so these options -// basically only limit the range within which G1 can pick a young gen -// size. Also, these are general options taking byte sizes. G1 will -// internally work with a number of regions instead. So, some rounding -// will occur. -// -// If nothing related to the the young gen size is set on the command -// line we should allow the young gen to be between G1NewSizePercent -// and G1MaxNewSizePercent of the heap size. This means that every time -// the heap size changes, the limits for the young gen size will be -// recalculated. -// -// If only -XX:NewSize is set we should use the specified value as the -// minimum size for young gen. Still using G1MaxNewSizePercent of the -// heap as maximum. -// -// If only -XX:MaxNewSize is set we should use the specified value as the -// maximum size for young gen. Still using G1NewSizePercent of the heap -// as minimum. -// -// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. -// No updates when the heap size changes. There is a special case when -// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a -// different heuristic for calculating the collection set when we do mixed -// collection. -// -// If only -XX:NewRatio is set we should use the specified ratio of the heap -// as both min and max. This will be interpreted as "fixed" just like the -// NewSize==MaxNewSize case above. But we will update the min and max -// every time the heap size changes. -// -// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is -// combined with either NewSize or MaxNewSize. (A warning message is printed.) -class G1YoungGenSizer : public CHeapObj { -private: - enum SizerKind { - SizerDefaults, - SizerNewSizeOnly, - SizerMaxNewSizeOnly, - SizerMaxAndNewSize, - SizerNewRatio - }; - SizerKind _sizer_kind; - uint _min_desired_young_length; - uint _max_desired_young_length; - bool _adaptive_size; - uint calculate_default_min_length(uint new_number_of_heap_regions); - uint calculate_default_max_length(uint new_number_of_heap_regions); - - // Update the given values for minimum and maximum young gen length in regions - // given the number of heap regions depending on the kind of sizing algorithm. - void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); - -public: - G1YoungGenSizer(); - // Calculate the maximum length of the young gen given the number of regions - // depending on the sizing algorithm. - uint max_young_length(uint number_of_heap_regions); - - void heap_size_changed(uint new_number_of_heap_regions); - uint min_desired_young_length() { - return _min_desired_young_length; - } - uint max_desired_young_length() { - return _max_desired_young_length; - } - - bool adaptive_young_list_length() const { - return _adaptive_size; - } -}; - - -G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), - _min_desired_young_length(0), _max_desired_young_length(0) { - if (FLAG_IS_CMDLINE(NewRatio)) { - if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { - warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); - } else { - _sizer_kind = SizerNewRatio; - _adaptive_size = false; - return; - } - } - - if (NewSize > MaxNewSize) { - if (FLAG_IS_CMDLINE(MaxNewSize)) { - warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " - "A new max generation size of " SIZE_FORMAT "k will be used.", - NewSize/K, MaxNewSize/K, NewSize/K); - } - MaxNewSize = NewSize; - } - - if (FLAG_IS_CMDLINE(NewSize)) { - _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), - 1U); - if (FLAG_IS_CMDLINE(MaxNewSize)) { - _max_desired_young_length = - MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), - 1U); - _sizer_kind = SizerMaxAndNewSize; - _adaptive_size = _min_desired_young_length == _max_desired_young_length; - } else { - _sizer_kind = SizerNewSizeOnly; - } - } else if (FLAG_IS_CMDLINE(MaxNewSize)) { - _max_desired_young_length = - MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), - 1U); - _sizer_kind = SizerMaxNewSizeOnly; - } -} - -uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { - uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; - return MAX2(1U, default_value); -} - -uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { - uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; - return MAX2(1U, default_value); -} - -void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { - assert(number_of_heap_regions > 0, "Heap must be initialized"); - - switch (_sizer_kind) { - case SizerDefaults: - *min_young_length = calculate_default_min_length(number_of_heap_regions); - *max_young_length = calculate_default_max_length(number_of_heap_regions); - break; - case SizerNewSizeOnly: - *max_young_length = calculate_default_max_length(number_of_heap_regions); - *max_young_length = MAX2(*min_young_length, *max_young_length); - break; - case SizerMaxNewSizeOnly: - *min_young_length = calculate_default_min_length(number_of_heap_regions); - *min_young_length = MIN2(*min_young_length, *max_young_length); - break; - case SizerMaxAndNewSize: - // Do nothing. Values set on the command line, don't update them at runtime. - break; - case SizerNewRatio: - *min_young_length = number_of_heap_regions / (NewRatio + 1); - *max_young_length = *min_young_length; - break; - default: - ShouldNotReachHere(); - } - - assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); -} - -uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { - // We need to pass the desired values because recalculation may not update these - // values in some cases. - uint temp = _min_desired_young_length; - uint result = _max_desired_young_length; - recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); - return result; -} - -void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { - recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, - &_max_desired_young_length); -} - void G1CollectorPolicy::post_heap_initialize() { uintx max_regions = G1CollectedHeap::heap()->max_regions(); size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; @@ -478,9 +156,8 @@ FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); } - if (SurvivorRatio < 1) { - vm_exit_during_initialization("Invalid survivor ratio specified"); - } + guarantee(SurvivorRatio >= 1, "Range checking for SurvivorRatio should guarantee that value is >= 1"); + CollectorPolicy::initialize_flags(); _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags } @@ -489,6 +166,8 @@ void G1CollectorPolicy::init() { // Set aside an initial future to_space. _g1 = G1CollectedHeap::heap(); + _collection_set = _g1->collection_set(); + _collection_set->set_policy(this); assert(Heap_lock->owned_by_self(), "Locking discipline."); @@ -504,11 +183,11 @@ update_young_list_max_and_target_length(); // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info - start_incremental_cset_building(); + _collection_set->start_incremental_building(); } -void G1CollectorPolicy::note_gc_start(uint num_active_workers) { - phase_times()->note_gc_start(num_active_workers); +void G1CollectorPolicy::note_gc_start() { + phase_times()->note_gc_start(); } // Create the jstat counters for the policy. @@ -528,8 +207,9 @@ double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); size_t bytes_to_copy = (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); - double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); - double young_other_time_ms = predict_young_other_time_ms(young_length); + double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy, + collector_state()->during_concurrent_mark()); + double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length); double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; if (pause_time_ms > target_pause_time_ms) { // end condition 2: prediction is over the target pause time @@ -573,10 +253,10 @@ uint base_min_length) const { uint desired_min_length = 0; if (adaptive_young_list_length()) { - if (_alloc_rate_ms_seq->num() > 3) { + if (_analytics->num_alloc_rate_ms() > 3) { double now_sec = os::elapsedTime(); double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; - double alloc_rate_ms = predict_alloc_rate_ms(); + double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); } else { // otherwise we don't have enough info to make the prediction @@ -595,7 +275,7 @@ } uint G1CollectorPolicy::update_young_list_max_and_target_length() { - return update_young_list_max_and_target_length(get_new_size_prediction(_rs_lengths_seq)); + return update_young_list_max_and_target_length(_analytics->predict_rs_lengths()); } uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { @@ -616,7 +296,7 @@ // Calculate the absolute and desired min bounds first. // This is how many young regions we already have (currently: the survivors). - uint base_min_length = recorded_survivor_regions(); + const uint base_min_length = _g1->young_list()->survivor_length(); uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); // This is the absolute minimum young length. Ensure that we // will at least have one eden region available for allocation. @@ -667,7 +347,7 @@ young_list_target_length = desired_min_length; } - assert(young_list_target_length > recorded_survivor_regions(), + assert(young_list_target_length > base_min_length, "we should be able to allocate at least one eden region"); assert(young_list_target_length >= absolute_min_length, "post-condition"); @@ -700,9 +380,9 @@ double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double survivor_regions_evac_time = predict_survivor_regions_evac_time(); - size_t pending_cards = get_new_size_prediction(_pending_cards_seq); - size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); - size_t scanned_cards = predict_young_card_num(adj_rs_lengths); + size_t pending_cards = _analytics->predict_pending_cards(); + size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff(); + size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + survivor_regions_evac_time; @@ -781,8 +461,8 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() const { double survivor_regions_evac_time = 0.0; - for (HeapRegion * r = _recorded_survivor_head; - r != NULL && r != _recorded_survivor_tail->get_next_young_region(); + for (HeapRegion * r = _g1->young_list()->first_survivor_region(); + r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); r = r->get_next_young_region()) { survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); } @@ -802,7 +482,7 @@ } void G1CollectorPolicy::update_rs_lengths_prediction() { - update_rs_lengths_prediction(get_new_size_prediction(_rs_lengths_seq)); + update_rs_lengths_prediction(_analytics->predict_rs_lengths()); } void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { @@ -870,7 +550,7 @@ double full_gc_time_sec = end_sec - _full_collection_start_sec; double full_gc_time_ms = full_gc_time_sec * 1000.0; - update_recent_gc_times(end_sec, full_gc_time_ms); + _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); collector_state()->set_full_collection(false); @@ -886,8 +566,6 @@ _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups - record_survivor_regions(0, NULL, NULL); - _free_regions_at_end_of_collection = _g1->num_free_regions(); // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); @@ -913,7 +591,7 @@ phase_times()->record_cur_collection_start_sec(start_time_sec); _pending_cards = _g1->pending_card_num(); - _collection_set_bytes_used_before = 0; + _collection_set->reset_bytes_used_before(); _bytes_copied_during_gc = 0; collector_state()->set_last_gc_was_young(false); @@ -940,8 +618,8 @@ void G1CollectorPolicy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; - _concurrent_mark_remark_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; + _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); + _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Remark, _mark_remark_start_sec, end_time_sec); } @@ -988,6 +666,10 @@ return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); } +CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { + return _collection_set->cset_chooser(); +} + bool G1CollectorPolicy::about_to_start_mixed_phase() const { return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); } @@ -1036,7 +718,7 @@ maybe_start_marking(); } - double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); + double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); if (app_time_ms < MIN_TIMER_GRANULARITY) { // This usually happens due to the timer not having the required // granularity. Some Linuxes are the usual culprits. @@ -1053,33 +735,14 @@ // given that humongous object allocations do not really affect // either the pause's duration nor when the next pause will take // place we can safely ignore them here. - uint regions_allocated = eden_cset_region_length(); + uint regions_allocated = _collection_set->eden_region_length(); double alloc_rate_ms = (double) regions_allocated / app_time_ms; - _alloc_rate_ms_seq->add(alloc_rate_ms); + _analytics->report_alloc_rate_ms(alloc_rate_ms); double interval_ms = - (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; - update_recent_gc_times(end_time_sec, pause_time_ms); - _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; - if (recent_avg_pause_time_ratio() < 0.0 || - (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { - // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in - // CR 6902692 by redoing the manner in which the ratio is incrementally computed. - if (_recent_avg_pause_time_ratio < 0.0) { - _recent_avg_pause_time_ratio = 0.0; - } else { - assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); - _recent_avg_pause_time_ratio = 1.0; - } - } - - // Compute the ratio of just this last pause time to the entire time range stored - // in the vectors. Comparing this pause to the entire range, rather than only the - // most recent interval, has the effect of smoothing over a possible transient 'burst' - // of more frequent pauses that don't really reflect a change in heap occupancy. - // This reduces the likelihood of a needless heap expansion being triggered. - _last_pause_time_ratio = - (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; + (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; + _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); + _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); } bool new_in_marking_window = collector_state()->in_marking_window(); @@ -1125,28 +788,20 @@ double cost_per_card_ms = 0.0; if (_pending_cards > 0) { cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; - _cost_per_card_ms_seq->add(cost_per_card_ms); + _analytics->report_cost_per_card_ms(cost_per_card_ms); } - _cost_scan_hcc_seq->add(scan_hcc_time_ms); + _analytics->report_cost_scan_hcc(scan_hcc_time_ms); double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; - if (collector_state()->last_gc_was_young()) { - _cost_per_entry_ms_seq->add(cost_per_entry_ms); - } else { - _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); - } + _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); } if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; - if (collector_state()->last_gc_was_young()) { - _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } else { - _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } + _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); } // This is defensive. For a while _max_rs_lengths could get @@ -1163,38 +818,35 @@ // say, it's in mid-coarsening). So I'll leave in the defensive // conditional below just in case. size_t rs_length_diff = 0; - if (_max_rs_lengths > _recorded_rs_lengths) { - rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; + size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); + if (_max_rs_lengths > recorded_rs_lengths) { + rs_length_diff = _max_rs_lengths - recorded_rs_lengths; } - _rs_length_diff_seq->add((double) rs_length_diff); + _analytics->report_rs_length_diff((double) rs_length_diff); size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; - size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; + size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; double cost_per_byte_ms = 0.0; if (copied_bytes > 0) { cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; - if (collector_state()->in_marking_window()) { - _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); - } else { - _cost_per_byte_ms_seq->add(cost_per_byte_ms); - } + _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); + } + + if (_collection_set->young_region_length() > 0) { + _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / + _collection_set->young_region_length()); } - if (young_cset_region_length() > 0) { - _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / - young_cset_region_length()); + if (_collection_set->old_region_length() > 0) { + _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / + _collection_set->old_region_length()); } - if (old_cset_region_length() > 0) { - _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / - old_cset_region_length()); - } + _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); - _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); - - _pending_cards_seq->add((double) _pending_cards); - _rs_lengths_seq->add((double) _max_rs_lengths); + _analytics->report_pending_cards((double) _pending_cards); + _analytics->report_rs_lengths((double) _max_rs_lengths); } collector_state()->set_in_marking_window(new_in_marking_window); @@ -1226,9 +878,9 @@ } else { update_rs_time_goal_ms -= scan_hcc_time_ms; } - adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, - phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), - update_rs_time_goal_ms); + _g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, + phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), + update_rs_time_goal_ms); cset_chooser()->verify(); } @@ -1290,143 +942,10 @@ phase_times()->print(); } -void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, - double update_rs_processed_buffers, - double goal_ms) { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); - - if (G1UseAdaptiveConcRefinement) { - const int k_gy = 3, k_gr = 6; - const double inc_k = 1.1, dec_k = 0.9; - - size_t g = cg1r->green_zone(); - if (update_rs_time > goal_ms) { - g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. - } else { - if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { - g = (size_t)MAX2(g * inc_k, g + 1.0); - } - } - // Change the refinement threads params - cg1r->set_green_zone(g); - cg1r->set_yellow_zone(g * k_gy); - cg1r->set_red_zone(g * k_gr); - cg1r->reinitialize_threads(); - - size_t processing_threshold_delta = MAX2(cg1r->green_zone() * _predictor.sigma(), 1); - size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, - cg1r->yellow_zone()); - // Change the barrier params - dcqs.set_process_completed_threshold((int)processing_threshold); - dcqs.set_max_completed_queue((int)cg1r->red_zone()); - } - - size_t curr_queue_size = dcqs.completed_buffers_num(); - if (curr_queue_size >= cg1r->yellow_zone()) { - dcqs.set_completed_queue_padding(curr_queue_size); - } else { - dcqs.set_completed_queue_padding(0); - } - dcqs.notify_if_necessary(); -} - -size_t G1CollectorPolicy::predict_rs_length_diff() const { - return get_new_size_prediction(_rs_length_diff_seq); -} - -double G1CollectorPolicy::predict_alloc_rate_ms() const { - return get_new_prediction(_alloc_rate_ms_seq); -} - -double G1CollectorPolicy::predict_cost_per_card_ms() const { - return get_new_prediction(_cost_per_card_ms_seq); -} - -double G1CollectorPolicy::predict_scan_hcc_ms() const { - return get_new_prediction(_cost_scan_hcc_seq); -} - -double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { - return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); -} - -double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { - return get_new_prediction(_young_cards_per_entry_ratio_seq); -} - -double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { - if (_mixed_cards_per_entry_ratio_seq->num() < 2) { - return predict_young_cards_per_entry_ratio(); - } else { - return get_new_prediction(_mixed_cards_per_entry_ratio_seq); - } -} - -size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { - return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); -} - -size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { - return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); -} - -double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { - if (collector_state()->gcs_are_young()) { - return card_num * get_new_prediction(_cost_per_entry_ms_seq); - } else { - return predict_mixed_rs_scan_time_ms(card_num); - } -} - -double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { - if (_mixed_cost_per_entry_ms_seq->num() < 3) { - return card_num * get_new_prediction(_cost_per_entry_ms_seq); - } else { - return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); - } -} - -double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { - if (_cost_per_byte_ms_during_cm_seq->num() < 3) { - return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); - } else { - return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); - } -} - -double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { - if (collector_state()->during_concurrent_mark()) { - return predict_object_copy_time_ms_during_cm(bytes_to_copy); - } else { - return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); - } -} - -double G1CollectorPolicy::predict_constant_other_time_ms() const { - return get_new_prediction(_constant_other_time_ms_seq); -} - -double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { - return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); -} - -double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { - return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); -} - -double G1CollectorPolicy::predict_remark_time_ms() const { - return get_new_prediction(_concurrent_mark_remark_times_ms); -} - -double G1CollectorPolicy::predict_cleanup_time_ms() const { - return get_new_prediction(_concurrent_mark_cleanup_times_ms); -} - double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { TruncatedSeq* seq = surv_rate_group->get_seq(age); guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); - double pred = get_new_prediction(seq); + double pred = _predictor.get_new_prediction(seq); if (pred > 1.0) { pred = 1.0; } @@ -1444,19 +963,14 @@ double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, size_t scanned_cards) const { return - predict_rs_update_time_ms(pending_cards) + - predict_rs_scan_time_ms(scanned_cards) + - predict_constant_other_time_ms(); + _analytics->predict_rs_update_time_ms(pending_cards) + + _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + + _analytics->predict_constant_other_time_ms(); } double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { - size_t rs_length = predict_rs_length_diff(); - size_t card_num; - if (collector_state()->gcs_are_young()) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } + size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff(); + size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young()); return predict_base_elapsed_time_ms(pending_cards, card_num); } @@ -1476,160 +990,25 @@ double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const { size_t rs_length = hr->rem_set()->occupied(); - size_t card_num; - // Predicting the number of cards is based on which type of GC // we're predicting for. - if (for_young_gc) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } + size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc); size_t bytes_to_copy = predict_bytes_to_copy(hr); double region_elapsed_time_ms = - predict_rs_scan_time_ms(card_num) + - predict_object_copy_time_ms(bytes_to_copy); + _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + + _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark()); // The prediction of the "other" time for this region is based // upon the region type and NOT the GC type. if (hr->is_young()) { - region_elapsed_time_ms += predict_young_other_time_ms(1); + region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); } else { - region_elapsed_time_ms += predict_non_young_other_time_ms(1); + region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); } return region_elapsed_time_ms; } -void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, - uint survivor_cset_region_length) { - _eden_cset_region_length = eden_cset_region_length; - _survivor_cset_region_length = survivor_cset_region_length; - _old_cset_region_length = 0; -} - -void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { - _recorded_rs_lengths = rs_lengths; -} - -void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, - double elapsed_ms) { - _recent_gc_times_ms->add(elapsed_ms); - _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); - _prev_collection_pause_end_ms = end_time_sec * 1000.0; -} - -void G1CollectorPolicy::clear_ratio_check_data() { - _ratio_over_threshold_count = 0; - _ratio_over_threshold_sum = 0.0; - _pauses_since_start = 0; -} - -size_t G1CollectorPolicy::expansion_amount() { - double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; - double last_gc_overhead = _last_pause_time_ratio * 100.0; - double threshold = _gc_overhead_perc; - size_t expand_bytes = 0; - - // If the heap is at less than half its maximum size, scale the threshold down, - // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, - // though the scaling code will likely keep the increase small. - if (_g1->capacity() <= _g1->max_capacity() / 2) { - threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); - threshold = MAX2(threshold, 1.0); - } - - // If the last GC time ratio is over the threshold, increment the count of - // times it has been exceeded, and add this ratio to the sum of exceeded - // ratios. - if (last_gc_overhead > threshold) { - _ratio_over_threshold_count++; - _ratio_over_threshold_sum += last_gc_overhead; - } - - // Check if we've had enough GC time ratio checks that were over the - // threshold to trigger an expansion. We'll also expand if we've - // reached the end of the history buffer and the average of all entries - // is still over the threshold. This indicates a smaller number of GCs were - // long enough to make the average exceed the threshold. - bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; - if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || - (filled_history_buffer && (recent_gc_overhead > threshold))) { - size_t min_expand_bytes = HeapRegion::GrainBytes; - size_t reserved_bytes = _g1->max_capacity(); - size_t committed_bytes = _g1->capacity(); - size_t uncommitted_bytes = reserved_bytes - committed_bytes; - size_t expand_bytes_via_pct = - uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; - double scale_factor = 1.0; - - // If the current size is less than 1/4 of the Initial heap size, expand - // by half of the delta between the current and Initial sizes. IE, grow - // back quickly. - // - // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of - // the available expansion space, whichever is smaller, as the base - // expansion size. Then possibly scale this size according to how much the - // threshold has (on average) been exceeded by. If the delta is small - // (less than the StartScaleDownAt value), scale the size down linearly, but - // not by less than MinScaleDownFactor. If the delta is large (greater than - // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor - // times the base size. The scaling will be linear in the range from - // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, - // ScaleUpRange sets the rate of scaling up. - if (committed_bytes < InitialHeapSize / 4) { - expand_bytes = (InitialHeapSize - committed_bytes) / 2; - } else { - double const MinScaleDownFactor = 0.2; - double const MaxScaleUpFactor = 2; - double const StartScaleDownAt = _gc_overhead_perc; - double const StartScaleUpAt = _gc_overhead_perc * 1.5; - double const ScaleUpRange = _gc_overhead_perc * 2.0; - - double ratio_delta; - if (filled_history_buffer) { - ratio_delta = recent_gc_overhead - threshold; - } else { - ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; - } - - expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); - if (ratio_delta < StartScaleDownAt) { - scale_factor = ratio_delta / StartScaleDownAt; - scale_factor = MAX2(scale_factor, MinScaleDownFactor); - } else if (ratio_delta > StartScaleUpAt) { - scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); - scale_factor = MIN2(scale_factor, MaxScaleUpFactor); - } - } - - log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " - "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", - recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); - - expand_bytes = static_cast(expand_bytes * scale_factor); - - // Ensure the expansion size is at least the minimum growth amount - // and at most the remaining uncommitted byte size. - expand_bytes = MAX2(expand_bytes, min_expand_bytes); - expand_bytes = MIN2(expand_bytes, uncommitted_bytes); - - clear_ratio_check_data(); - } else { - // An expansion was not triggered. If we've started counting, increment - // the number of checks we've made in the current window. If we've - // reached the end of the window without resizing, clear the counters to - // start again the next time we see a ratio above the threshold. - if (_ratio_over_threshold_count > 0) { - _pauses_since_start++; - if (_pauses_since_start > NumPrevPausesForHeuristics) { - clear_ratio_check_data(); - } - } - } - - return expand_bytes; -} void G1CollectorPolicy::print_yg_surv_rate_info() const { #ifndef PRODUCT @@ -1747,269 +1126,17 @@ } } -class ParKnownGarbageHRClosure: public HeapRegionClosure { - G1CollectedHeap* _g1h; - CSetChooserParUpdater _cset_updater; - -public: - ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, - uint chunk_size) : - _g1h(G1CollectedHeap::heap()), - _cset_updater(hrSorted, true /* parallel */, chunk_size) { } - - bool doHeapRegion(HeapRegion* r) { - // Do we have any marking information for this region? - if (r->is_marked()) { - // We will skip any region that's currently used as an old GC - // alloc region (we should not consider those for collection - // before we fill them up). - if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { - _cset_updater.add_region(r); - } - } - return false; - } -}; - -class ParKnownGarbageTask: public AbstractGangTask { - CollectionSetChooser* _hrSorted; - uint _chunk_size; - G1CollectedHeap* _g1; - HeapRegionClaimer _hrclaimer; - -public: - ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : - AbstractGangTask("ParKnownGarbageTask"), - _hrSorted(hrSorted), _chunk_size(chunk_size), - _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} - - void work(uint worker_id) { - ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); - _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); - } -}; - -uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { - assert(n_workers > 0, "Active gc workers should be greater than 0"); - const uint overpartition_factor = 4; - const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); - return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); -} - void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { - cset_chooser()->clear(); - - WorkGang* workers = _g1->workers(); - uint n_workers = workers->active_workers(); - - uint n_regions = _g1->num_regions(); - uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); - cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); - ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); - workers->run_task(&par_known_garbage_task); - - cset_chooser()->sort_regions(); + cset_chooser()->rebuild(_g1->workers(), _g1->num_regions()); double end_sec = os::elapsedTime(); double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; - _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; + _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); + _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } -// Add the heap region at the head of the non-incremental collection set -void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { - assert(_inc_cset_build_state == Active, "Precondition"); - assert(hr->is_old(), "the region should be old"); - - assert(!hr->in_collection_set(), "should not already be in the CSet"); - _g1->register_old_region_with_cset(hr); - hr->set_next_in_collection_set(_collection_set); - _collection_set = hr; - _collection_set_bytes_used_before += hr->used(); - size_t rs_length = hr->rem_set()->occupied(); - _recorded_rs_lengths += rs_length; - _old_cset_region_length += 1; -} - -// Initialize the per-collection-set information -void G1CollectorPolicy::start_incremental_cset_building() { - assert(_inc_cset_build_state == Inactive, "Precondition"); - - _inc_cset_head = NULL; - _inc_cset_tail = NULL; - _inc_cset_bytes_used_before = 0; - - _inc_cset_recorded_rs_lengths = 0; - _inc_cset_recorded_rs_lengths_diffs = 0; - _inc_cset_predicted_elapsed_time_ms = 0.0; - _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; - _inc_cset_build_state = Active; -} - -void G1CollectorPolicy::finalize_incremental_cset_building() { - assert(_inc_cset_build_state == Active, "Precondition"); - assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); - - // The two "main" fields, _inc_cset_recorded_rs_lengths and - // _inc_cset_predicted_elapsed_time_ms, are updated by the thread - // that adds a new region to the CSet. Further updates by the - // concurrent refinement thread that samples the young RSet lengths - // are accumulated in the *_diffs fields. Here we add the diffs to - // the "main" fields. - - if (_inc_cset_recorded_rs_lengths_diffs >= 0) { - _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; - } else { - // This is defensive. The diff should in theory be always positive - // as RSets can only grow between GCs. However, given that we - // sample their size concurrently with other threads updating them - // it's possible that we might get the wrong size back, which - // could make the calculations somewhat inaccurate. - size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); - if (_inc_cset_recorded_rs_lengths >= diffs) { - _inc_cset_recorded_rs_lengths -= diffs; - } else { - _inc_cset_recorded_rs_lengths = 0; - } - } - _inc_cset_predicted_elapsed_time_ms += - _inc_cset_predicted_elapsed_time_ms_diffs; - - _inc_cset_recorded_rs_lengths_diffs = 0; - _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; -} - -void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { - // This routine is used when: - // * adding survivor regions to the incremental cset at the end of an - // evacuation pause, - // * adding the current allocation region to the incremental cset - // when it is retired, and - // * updating existing policy information for a region in the - // incremental cset via young list RSet sampling. - // Therefore this routine may be called at a safepoint by the - // VM thread, or in-between safepoints by mutator threads (when - // retiring the current allocation region) or a concurrent - // refine thread (RSet sampling). - - double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); - size_t used_bytes = hr->used(); - _inc_cset_recorded_rs_lengths += rs_length; - _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; - _inc_cset_bytes_used_before += used_bytes; - - // Cache the values we have added to the aggregated information - // in the heap region in case we have to remove this region from - // the incremental collection set, or it is updated by the - // rset sampling code - hr->set_recorded_rs_length(rs_length); - hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); -} - -void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, - size_t new_rs_length) { - // Update the CSet information that is dependent on the new RS length - assert(hr->is_young(), "Precondition"); - assert(!SafepointSynchronize::is_at_safepoint(), - "should not be at a safepoint"); - - // We could have updated _inc_cset_recorded_rs_lengths and - // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do - // that atomically, as this code is executed by a concurrent - // refinement thread, potentially concurrently with a mutator thread - // allocating a new region and also updating the same fields. To - // avoid the atomic operations we accumulate these updates on two - // separate fields (*_diffs) and we'll just add them to the "main" - // fields at the start of a GC. - - ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); - ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; - _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; - - double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); - double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); - double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; - _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; - - hr->set_recorded_rs_length(new_rs_length); - hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); -} - -void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { - assert(hr->is_young(), "invariant"); - assert(hr->young_index_in_cset() > -1, "should have already been set"); - assert(_inc_cset_build_state == Active, "Precondition"); - - // We need to clear and set the cached recorded/cached collection set - // information in the heap region here (before the region gets added - // to the collection set). An individual heap region's cached values - // are calculated, aggregated with the policy collection set info, - // and cached in the heap region here (initially) and (subsequently) - // by the Young List sampling code. - - size_t rs_length = hr->rem_set()->occupied(); - add_to_incremental_cset_info(hr, rs_length); - - assert(!hr->in_collection_set(), "invariant"); - _g1->register_young_region_with_cset(hr); - assert(hr->next_in_collection_set() == NULL, "invariant"); -} - -// Add the region at the RHS of the incremental cset -void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { - // We should only ever be appending survivors at the end of a pause - assert(hr->is_survivor(), "Logic"); - - // Do the 'common' stuff - add_region_to_incremental_cset_common(hr); - - // Now add the region at the right hand side - if (_inc_cset_tail == NULL) { - assert(_inc_cset_head == NULL, "invariant"); - _inc_cset_head = hr; - } else { - _inc_cset_tail->set_next_in_collection_set(hr); - } - _inc_cset_tail = hr; -} - -// Add the region to the LHS of the incremental cset -void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { - // Survivors should be added to the RHS at the end of a pause - assert(hr->is_eden(), "Logic"); - - // Do the 'common' stuff - add_region_to_incremental_cset_common(hr); - - // Add the region at the left hand side - hr->set_next_in_collection_set(_inc_cset_head); - if (_inc_cset_head == NULL) { - assert(_inc_cset_tail == NULL, "Invariant"); - _inc_cset_tail = hr; - } - _inc_cset_head = hr; -} - -#ifndef PRODUCT -void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { - assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); - - st->print_cr("\nCollection_set:"); - HeapRegion* csr = list_head; - while (csr != NULL) { - HeapRegion* next = csr->next_in_collection_set(); - assert(csr->in_collection_set(), "bad CS"); - st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", - HR_FORMAT_PARAMS(csr), - p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()), - csr->age_in_surv_rate_group_cond()); - csr = next; - } -} -#endif // !PRODUCT - double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { // Returns the given amount of reclaimable bytes (that represents // the amount of reclaimable space still to be collected) as a @@ -2139,161 +1266,7 @@ return (uint) result; } - -double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) { - double young_start_time_sec = os::elapsedTime(); - - YoungList* young_list = _g1->young_list(); - finalize_incremental_cset_building(); - - guarantee(target_pause_time_ms > 0.0, - "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); - guarantee(_collection_set == NULL, "Precondition"); - - double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); - double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); - - log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms", - _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); - - collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); - - // The young list is laid with the survivor regions from the previous - // pause are appended to the RHS of the young list, i.e. - // [Newly Young Regions ++ Survivors from last pause]. - - uint survivor_region_length = young_list->survivor_length(); - uint eden_region_length = young_list->eden_length(); - init_cset_region_lengths(eden_region_length, survivor_region_length); - - HeapRegion* hr = young_list->first_survivor_region(); - while (hr != NULL) { - assert(hr->is_survivor(), "badly formed young list"); - // There is a convention that all the young regions in the CSet - // are tagged as "eden", so we do this for the survivors here. We - // use the special set_eden_pre_gc() as it doesn't check that the - // region is free (which is not the case here). - hr->set_eden_pre_gc(); - hr = hr->get_next_young_region(); - } - - // Clear the fields that point to the survivor list - they are all young now. - young_list->clear_survivors(); - - _collection_set = _inc_cset_head; - _collection_set_bytes_used_before = _inc_cset_bytes_used_before; - time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); - - log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms", - eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms); - - // The number of recorded young regions is the incremental - // collection set's current size - set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); - - double young_end_time_sec = os::elapsedTime(); - phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); - - return time_remaining_ms; +void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { + double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); + _collection_set->finalize_old_part(time_remaining_ms); } - -void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) { - double non_young_start_time_sec = os::elapsedTime(); - double predicted_old_time_ms = 0.0; - - - if (!collector_state()->gcs_are_young()) { - cset_chooser()->verify(); - const uint min_old_cset_length = calc_min_old_cset_length(); - const uint max_old_cset_length = calc_max_old_cset_length(); - - uint expensive_region_num = 0; - bool check_time_remaining = adaptive_young_list_length(); - - HeapRegion* hr = cset_chooser()->peek(); - while (hr != NULL) { - if (old_cset_region_length() >= max_old_cset_length) { - // Added maximum number of old regions to the CSet. - log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions", - old_cset_region_length(), max_old_cset_length); - break; - } - - - // Stop adding regions if the remaining reclaimable space is - // not above G1HeapWastePercent. - size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); - double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); - double threshold = (double) G1HeapWastePercent; - if (reclaimable_perc <= threshold) { - // We've added enough old regions that the amount of uncollected - // reclaimable space is at or below the waste threshold. Stop - // adding old regions to the CSet. - log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). " - "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%", - old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); - break; - } - - double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); - if (check_time_remaining) { - if (predicted_time_ms > time_remaining_ms) { - // Too expensive for the current CSet. - - if (old_cset_region_length() >= min_old_cset_length) { - // We have added the minimum number of old regions to the CSet, - // we are done with this CSet. - log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). " - "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions", - predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length); - break; - } - - // We'll add it anyway given that we haven't reached the - // minimum number of old regions. - expensive_region_num += 1; - } - } else { - if (old_cset_region_length() >= min_old_cset_length) { - // In the non-auto-tuning case, we'll finish adding regions - // to the CSet if we reach the minimum. - - log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions", - old_cset_region_length(), min_old_cset_length); - break; - } - } - - // We will add this region to the CSet. - time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); - predicted_old_time_ms += predicted_time_ms; - cset_chooser()->pop(); // already have region via peek() - _g1->old_set_remove(hr); - add_old_region_to_cset(hr); - - hr = cset_chooser()->peek(); - } - if (hr == NULL) { - log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)"); - } - - if (expensive_region_num > 0) { - // We print the information once here at the end, predicated on - // whether we added any apparently expensive regions or not, to - // avoid generating output per region. - log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)." - "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms", - old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms); - } - - cset_chooser()->verify(); - } - - stop_incremental_cset_building(); - - log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f", - old_cset_region_length(), predicted_old_time_ms, time_remaining_ms); - - double non_young_end_time_sec = os::elapsedTime(); - phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,7 +25,6 @@ #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP -#include "gc/g1/collectionSetChooser.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1InCSetState.hpp" @@ -41,8 +40,10 @@ // * when to collect. class HeapRegion; +class G1CollectionSet; class CollectionSetChooser; class G1IHOPControl; +class G1Analytics; class G1YoungGenSizer; class G1CollectorPolicy: public CollectorPolicy { @@ -57,30 +58,14 @@ void report_ihop_statistics(); G1Predictions _predictor; - - double get_new_prediction(TruncatedSeq const* seq) const; - size_t get_new_size_prediction(TruncatedSeq const* seq) const; - + G1Analytics* _analytics; G1MMUTracker* _mmu_tracker; void initialize_alignments(); void initialize_flags(); - CollectionSetChooser* _cset_chooser; - double _full_collection_start_sec; - // These exclude marking times. - TruncatedSeq* _recent_gc_times_ms; - - TruncatedSeq* _concurrent_mark_remark_times_ms; - TruncatedSeq* _concurrent_mark_cleanup_times_ms; - - // Ratio check data for determining if heap growth is necessary. - uint _ratio_over_threshold_count; - double _ratio_over_threshold_sum; - uint _pauses_since_start; - uint _young_list_target_length; uint _young_list_fixed_length; @@ -90,58 +75,14 @@ SurvRateGroup* _short_lived_surv_rate_group; SurvRateGroup* _survivor_surv_rate_group; - // add here any more surv rate groups - - double _gc_overhead_perc; double _reserve_factor; uint _reserve_regions; - enum PredictionConstants { - TruncatedSeqLength = 10, - NumPrevPausesForHeuristics = 10, - // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics, - // representing the minimum number of pause time ratios that exceed - // GCTimeRatio before a heap expansion will be triggered. - MinOverThresholdForGrowth = 4 - }; - - TruncatedSeq* _alloc_rate_ms_seq; - double _prev_collection_pause_end_ms; - - TruncatedSeq* _rs_length_diff_seq; - TruncatedSeq* _cost_per_card_ms_seq; - TruncatedSeq* _cost_scan_hcc_seq; - TruncatedSeq* _young_cards_per_entry_ratio_seq; - TruncatedSeq* _mixed_cards_per_entry_ratio_seq; - TruncatedSeq* _cost_per_entry_ms_seq; - TruncatedSeq* _mixed_cost_per_entry_ms_seq; - TruncatedSeq* _cost_per_byte_ms_seq; - TruncatedSeq* _constant_other_time_ms_seq; - TruncatedSeq* _young_other_cost_per_region_ms_seq; - TruncatedSeq* _non_young_other_cost_per_region_ms_seq; - - TruncatedSeq* _pending_cards_seq; - TruncatedSeq* _rs_lengths_seq; - - TruncatedSeq* _cost_per_byte_ms_during_cm_seq; - G1YoungGenSizer* _young_gen_sizer; - uint _eden_cset_region_length; - uint _survivor_cset_region_length; - uint _old_cset_region_length; - - void init_cset_region_lengths(uint eden_cset_region_length, - uint survivor_cset_region_length); - - uint eden_cset_region_length() const { return _eden_cset_region_length; } - uint survivor_cset_region_length() const { return _survivor_cset_region_length; } - uint old_cset_region_length() const { return _old_cset_region_length; } - uint _free_regions_at_end_of_collection; - size_t _recorded_rs_lengths; size_t _max_rs_lengths; size_t _rs_lengths_prediction; @@ -150,10 +91,6 @@ bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); #endif // PRODUCT - void adjust_concurrent_refinement(double update_rs_time, - double update_rs_processed_buffers, - double goal_ms); - double _pause_time_target_ms; size_t _pending_cards; @@ -165,6 +102,7 @@ G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; public: const G1Predictions& predictor() const { return _predictor; } + const G1Analytics* analytics() const { return const_cast(_analytics); } // Add the given number of bytes to the total number of allocated bytes in the old gen. void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } @@ -191,37 +129,6 @@ _max_rs_lengths = rs_lengths; } - size_t predict_rs_length_diff() const; - - double predict_alloc_rate_ms() const; - - double predict_cost_per_card_ms() const; - - double predict_scan_hcc_ms() const; - - double predict_rs_update_time_ms(size_t pending_cards) const; - - double predict_young_cards_per_entry_ratio() const; - - double predict_mixed_cards_per_entry_ratio() const; - - size_t predict_young_card_num(size_t rs_length) const; - - size_t predict_non_young_card_num(size_t rs_length) const; - - double predict_rs_scan_time_ms(size_t card_num) const; - - double predict_mixed_rs_scan_time_ms(size_t card_num) const; - - double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; - - double predict_object_copy_time_ms(size_t bytes_to_copy) const; - - double predict_constant_other_time_ms() const; - - double predict_young_other_time_ms(size_t young_num) const; - - double predict_non_young_other_time_ms(size_t non_young_num) const; double predict_base_elapsed_time_ms(size_t pending_cards) const; double predict_base_elapsed_time_ms(size_t pending_cards, @@ -229,13 +136,6 @@ size_t predict_bytes_to_copy(HeapRegion* hr) const; double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; - void set_recorded_rs_lengths(size_t rs_lengths); - - uint cset_region_length() const { return young_cset_region_length() + - old_cset_region_length(); } - uint young_cset_region_length() const { return eden_cset_region_length() + - survivor_cset_region_length(); } - double predict_survivor_regions_evac_time() const; bool should_update_surv_rate_group_predictors() { @@ -261,10 +161,6 @@ return _mmu_tracker->max_gc_time() * 1000.0; } - double predict_remark_time_ms() const; - - double predict_cleanup_time_ms() const; - // Returns an estimate of the survival rate of the region at yg-age // "yg_age". double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; @@ -274,6 +170,7 @@ double accum_yg_surv_rate_pred(int age) const; protected: + G1CollectionSet* _collection_set; virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; virtual double other_time_ms(double pause_time_ms) const; @@ -281,90 +178,17 @@ double non_young_other_time_ms() const; double constant_other_time_ms(double pause_time_ms) const; - CollectionSetChooser* cset_chooser() const { - return _cset_chooser; - } - + CollectionSetChooser* cset_chooser() const; private: - // Statistics kept per GC stoppage, pause or full. - TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; - - // Add a new GC of the given duration and end time to the record. - void update_recent_gc_times(double end_time_sec, double elapsed_ms); - - // The head of the list (via "next_in_collection_set()") representing the - // current collection set. Set from the incrementally built collection - // set at the start of the pause. - HeapRegion* _collection_set; - - // The number of bytes in the collection set before the pause. Set from - // the incrementally built collection set at the start of an evacuation - // pause, and incremented in finalize_old_cset_part() when adding old regions - // (if any) to the collection set. - size_t _collection_set_bytes_used_before; // The number of bytes copied during the GC. size_t _bytes_copied_during_gc; - // The associated information that is maintained while the incremental - // collection set is being built with young regions. Used to populate - // the recorded info for the evacuation pause. - - enum CSetBuildType { - Active, // We are actively building the collection set - Inactive // We are not actively building the collection set - }; - - CSetBuildType _inc_cset_build_state; - - // The head of the incrementally built collection set. - HeapRegion* _inc_cset_head; - - // The tail of the incrementally built collection set. - HeapRegion* _inc_cset_tail; - - // The number of bytes in the incrementally built collection set. - // Used to set _collection_set_bytes_used_before at the start of - // an evacuation pause. - size_t _inc_cset_bytes_used_before; - - // The RSet lengths recorded for regions in the CSet. It is updated - // by the thread that adds a new region to the CSet. We assume that - // only one thread can be allocating a new CSet region (currently, - // it does so after taking the Heap_lock) hence no need to - // synchronize updates to this field. - size_t _inc_cset_recorded_rs_lengths; - - // A concurrent refinement thread periodically samples the young - // region RSets and needs to update _inc_cset_recorded_rs_lengths as - // the RSets grow. Instead of having to synchronize updates to that - // field we accumulate them in this field and add it to - // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. - ssize_t _inc_cset_recorded_rs_lengths_diffs; - - // The predicted elapsed time it will take to collect the regions in - // the CSet. This is updated by the thread that adds a new region to - // the CSet. See the comment for _inc_cset_recorded_rs_lengths about - // MT-safety assumptions. - double _inc_cset_predicted_elapsed_time_ms; - - // See the comment for _inc_cset_recorded_rs_lengths_diffs. - double _inc_cset_predicted_elapsed_time_ms_diffs; - // Stash a pointer to the g1 heap. G1CollectedHeap* _g1; G1GCPhaseTimes* _phase_times; - // The ratio of gc time to elapsed time, computed over recent pauses, - // and the ratio for just the last pause. - double _recent_avg_pause_time_ratio; - double _last_pause_time_ratio; - - double recent_avg_pause_time_ratio() const { - return _recent_avg_pause_time_ratio; - } - // This set of variables tracks the collector efficiency, in order to // determine whether we should initiate a new marking. double _mark_remark_start_sec; @@ -412,10 +236,6 @@ void update_rs_lengths_prediction(); void update_rs_lengths_prediction(size_t prediction); - // Calculate and return chunk size (in number of regions) for parallel - // concurrent mark cleanup. - uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; - // Check whether a given young length (young_length) fits into the // given target pause time and whether the prediction for the amount // of objects to be copied for the given length will fit into the @@ -424,6 +244,9 @@ bool predict_will_fit(uint young_length, double base_time_ms, uint base_free_regions, double target_pause_time_ms) const; +public: + size_t pending_cards() const { return _pending_cards; } + // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. uint calc_min_old_cset_length() const; @@ -436,6 +259,7 @@ // as a percentage of the current heap capacity. double reclaimable_bytes_perc(size_t reclaimable_bytes) const; +private: // Sets up marking if proper conditions are met. void maybe_start_marking(); @@ -478,7 +302,7 @@ void init(); - virtual void note_gc_start(uint num_active_workers); + virtual void note_gc_start(); // Create jstat counters for the policy. virtual void initialize_gc_policy_counters(); @@ -520,83 +344,20 @@ return _bytes_copied_during_gc; } - size_t collection_set_bytes_used_before() const { - return _collection_set_bytes_used_before; - } - // Determine whether there are candidate regions so that the // next GC should be mixed. The two action strings are used // in the ergo output when the method returns true or false. bool next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const; - // Choose a new collection set. Marks the chosen regions as being - // "in_collection_set", and links them together. The head and number of - // the collection set are available via access methods. - double finalize_young_cset_part(double target_pause_time_ms); - virtual void finalize_old_cset_part(double time_remaining_ms); - - // The head of the list (via "next_in_collection_set()") representing the - // current collection set. - HeapRegion* collection_set() { return _collection_set; } - - void clear_collection_set() { _collection_set = NULL; } - - // Add old region "hr" to the CSet. - void add_old_region_to_cset(HeapRegion* hr); - - // Incremental CSet Support - - // The head of the incrementally built collection set. - HeapRegion* inc_cset_head() { return _inc_cset_head; } - - // The tail of the incrementally built collection set. - HeapRegion* inc_set_tail() { return _inc_cset_tail; } - - // Initialize incremental collection set info. - void start_incremental_cset_building(); - - // Perform any final calculations on the incremental CSet fields - // before we can use them. - void finalize_incremental_cset_building(); - - void clear_incremental_cset() { - _inc_cset_head = NULL; - _inc_cset_tail = NULL; - } - - // Stop adding regions to the incremental collection set - void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } - - // Add information about hr to the aggregated information for the - // incrementally built collection set. - void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); - - // Update information about hr in the aggregated information for - // the incrementally built collection set. - void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); - + virtual void finalize_collection_set(double target_pause_time_ms); private: - // Update the incremental cset information when adding a region - // (should not be called directly). - void add_region_to_incremental_cset_common(HeapRegion* hr); - // Set the state to start a concurrent marking cycle and clear // _initiate_conc_mark_if_possible because it has now been // acted on. void initiate_conc_mark(); public: - // Add hr to the LHS of the incremental collection set. - void add_region_to_incremental_cset_lhs(HeapRegion* hr); - - // Add hr to the RHS of the incremental collection set. - void add_region_to_incremental_cset_rhs(HeapRegion* hr); - -#ifndef PRODUCT - void print_collection_set(HeapRegion* list_head, outputStream* st); -#endif // !PRODUCT - // This sets the initiate_conc_mark_if_possible() flag to start a // new cycle, as long as we are not already in one. It's best if it // is called during a safepoint when the test whether a cycle is in @@ -611,13 +372,6 @@ // the initial-mark work and start a marking cycle. void decide_on_conc_mark_initiation(); - // If an expansion would be appropriate, because recent GC overhead had - // exceeded the desired limit, return an amount to expand by. - virtual size_t expansion_amount(); - - // Clear ratio tracking data used by expansion_amount(). - void clear_ratio_check_data(); - // Print stats on young survival ratio void print_yg_surv_rate_info() const; @@ -627,7 +381,6 @@ } else { _short_lived_surv_rate_group->finished_recalculating_age_indexes(); } - // do that for any other surv rate groups } size_t young_list_target_length() const { return _young_list_target_length; } @@ -658,16 +411,6 @@ // The limit on the number of regions allocated for survivors. uint _max_survivor_regions; - // For reporting purposes. - // The value of _heap_bytes_before_gc is also used to calculate - // the cost of copying. - - // The amount of survivor regions after a collection. - uint _recorded_survivor_regions; - // List of survivor regions. - HeapRegion* _recorded_survivor_head; - HeapRegion* _recorded_survivor_tail; - AgeTable _survivors_age_table; public: @@ -677,22 +420,6 @@ return _max_survivor_regions; } - static const uint REGIONS_UNLIMITED = (uint) -1; - - uint max_regions(InCSetState dest) const { - switch (dest.value()) { - case InCSetState::Young: - return _max_survivor_regions; - case InCSetState::Old: - return REGIONS_UNLIMITED; - default: - assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value()); - break; - } - // keep some compilers happy - return 0; - } - void note_start_adding_survivor_regions() { _survivor_surv_rate_group->start_adding_regions(); } @@ -701,18 +428,6 @@ _survivor_surv_rate_group->stop_adding_regions(); } - void record_survivor_regions(uint regions, - HeapRegion* head, - HeapRegion* tail) { - _recorded_survivor_regions = regions; - _recorded_survivor_head = head; - _recorded_survivor_tail = tail; - } - - uint recorded_survivor_regions() const { - return _recorded_survivor_regions; - } - void record_age_table(AgeTable* age_table) { _survivors_age_table.merge(age_table); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1CollectorState.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectorState.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1CollectorState.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,9 @@ #ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP #define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP +#include "gc/g1/g1YCTypes.hpp" +#include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" -#include "gc/g1/g1YCTypes.hpp" // Various state variables that indicate // the phase of the G1 collection. @@ -71,7 +72,6 @@ bool _in_marking_window; bool _in_marking_window_im; - bool _concurrent_cycle_started; bool _full_collection; public: @@ -87,7 +87,6 @@ _mark_in_progress(false), _in_marking_window(false), _in_marking_window_im(false), - _concurrent_cycle_started(false), _full_collection(false) {} // Setters @@ -100,7 +99,6 @@ void set_mark_in_progress(bool v) { _mark_in_progress = v; } void set_in_marking_window(bool v) { _in_marking_window = v; } void set_in_marking_window_im(bool v) { _in_marking_window_im = v; } - void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; } void set_full_collection(bool v) { _full_collection = v; } // Getters @@ -113,7 +111,6 @@ bool mark_in_progress() const { return _mark_in_progress; } bool in_marking_window() const { return _in_marking_window; } bool in_marking_window_im() const { return _in_marking_window_im; } - bool concurrent_cycle_started() const { return _concurrent_cycle_started; } bool full_collection() const { return _full_collection; } // Composite booleans (clients worry about flickering) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp --- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -120,74 +120,10 @@ } // We need to clear the bitmap on commit, removing any existing information. MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); - _bm->clearRange(mr); + _bm->clear_range(mr); } -// Closure used for clearing the given mark bitmap. -class ClearBitmapHRClosure : public HeapRegionClosure { - private: - G1ConcurrentMark* _cm; - G1CMBitMap* _bitmap; - bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. - public: - ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { - assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); - } - - virtual bool doHeapRegion(HeapRegion* r) { - size_t const chunk_size_in_words = M / HeapWordSize; - - HeapWord* cur = r->bottom(); - HeapWord* const end = r->end(); - - while (cur < end) { - MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); - _bitmap->clearRange(mr); - - cur += chunk_size_in_words; - - // Abort iteration if after yielding the marking has been aborted. - if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { - return true; - } - // Repeat the asserts from before the start of the closure. We will do them - // as asserts here to minimize their overhead on the product. However, we - // will have them as guarantees at the beginning / end of the bitmap - // clearing to get some checking in the product. - assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); - assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); - } - - return false; - } -}; - -class ParClearNextMarkBitmapTask : public AbstractGangTask { - ClearBitmapHRClosure* _cl; - HeapRegionClaimer _hrclaimer; - bool _suspendible; // If the task is suspendible, workers must join the STS. - -public: - ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : - _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} - - void work(uint worker_id) { - SuspendibleThreadSetJoiner sts_join(_suspendible); - G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); - } -}; - -void G1CMBitMap::clearAll() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); - uint n_workers = g1h->workers()->active_workers(); - ParClearNextMarkBitmapTask task(&cl, n_workers, false); - g1h->workers()->run_task(&task); - guarantee(cl.complete(), "Must have completed iteration."); - return; -} - -void G1CMBitMap::clearRange(MemRegion mr) { +void G1CMBitMap::clear_range(MemRegion mr) { mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); assert(!mr.is_empty(), "unexpected empty region"); // convert address range into offset range @@ -203,12 +139,12 @@ // allocate a stack of the requisite depth ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); if (!rs.is_reserved()) { - warning("ConcurrentMark MarkStack allocation failure"); + log_warning(gc)("ConcurrentMark MarkStack allocation failure"); return false; } MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); if (!_virtual_space.initialize(rs, rs.size())) { - warning("ConcurrentMark MarkStack backing store failure"); + log_warning(gc)("ConcurrentMark MarkStack backing store failure"); // Release the virtual memory reserved for the marking stack rs.release(); return false; @@ -441,7 +377,8 @@ _has_aborted(false), _restart_for_overflow(false), _concurrent_marking_in_progress(false), - _concurrent_phase_status(ConcPhaseNotStarted), + _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), + _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), // _verbose_level set below @@ -478,9 +415,8 @@ _root_regions.init(_g1h, this); if (ConcGCThreads > ParallelGCThreads) { - warning("Can't have more ConcGCThreads (%u) " - "than ParallelGCThreads (%u).", - ConcGCThreads, ParallelGCThreads); + log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", + ConcGCThreads, ParallelGCThreads); return; } if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { @@ -534,9 +470,9 @@ // Verify that the calculated value for MarkStackSize is in range. // It would be nice to use the private utility routine from Arguments. if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { - warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " - "must be between 1 and " SIZE_FORMAT, - mark_stack_size, MarkStackSizeMax); + log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " + "must be between 1 and " SIZE_FORMAT, + mark_stack_size, MarkStackSizeMax); return; } FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); @@ -545,16 +481,16 @@ if (FLAG_IS_CMDLINE(MarkStackSize)) { if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { - warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " - "must be between 1 and " SIZE_FORMAT, - MarkStackSize, MarkStackSizeMax); + log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " + "must be between 1 and " SIZE_FORMAT, + MarkStackSize, MarkStackSizeMax); return; } } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { - warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" - " or for MarkStackSizeMax (" SIZE_FORMAT ")", - MarkStackSize, MarkStackSizeMax); + log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" + " or for MarkStackSizeMax (" SIZE_FORMAT ")", + MarkStackSize, MarkStackSizeMax); return; } } @@ -562,7 +498,7 @@ } if (!_markStack.allocate(MarkStackSize)) { - warning("Failed to allocate CM marking stack"); + log_warning(gc)("Failed to allocate CM marking stack"); return; } @@ -698,9 +634,76 @@ ShouldNotReachHere(); } -void G1ConcurrentMark::clearNextBitmap() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - +class G1ClearBitMapTask : public AbstractGangTask { + // Heap region closure used for clearing the given mark bitmap. + class G1ClearBitmapHRClosure : public HeapRegionClosure { + private: + G1CMBitMap* _bitmap; + G1ConcurrentMark* _cm; + public: + G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { + } + + virtual bool doHeapRegion(HeapRegion* r) { + size_t const chunk_size_in_words = M / HeapWordSize; + + HeapWord* cur = r->bottom(); + HeapWord* const end = r->end(); + + while (cur < end) { + MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); + _bitmap->clear_range(mr); + + cur += chunk_size_in_words; + + // Abort iteration if after yielding the marking has been aborted. + if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { + return true; + } + // Repeat the asserts from before the start of the closure. We will do them + // as asserts here to minimize their overhead on the product. However, we + // will have them as guarantees at the beginning / end of the bitmap + // clearing to get some checking in the product. + assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); + assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); + } + assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); + + return false; + } + }; + + G1ClearBitmapHRClosure _cl; + HeapRegionClaimer _hr_claimer; + bool _suspendible; // If the task is suspendible, workers must join the STS. + +public: + G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : + AbstractGangTask("Parallel Clear Bitmap Task"), + _cl(bitmap, suspendible ? cm : NULL), + _hr_claimer(n_workers), + _suspendible(suspendible) + { } + + void work(uint worker_id) { + SuspendibleThreadSetJoiner sts_join(_suspendible); + G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); + } + + bool is_complete() { + return _cl.complete(); + } +}; + +void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { + assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); + + G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield); + workers->run_task(&task); + guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding."); +} + +void G1ConcurrentMark::cleanup_for_next_mark() { // Make sure that the concurrent mark thread looks to still be in // the current cycle. guarantee(cmThread()->during_cycle(), "invariant"); @@ -709,21 +712,24 @@ // marking bitmap and getting it ready for the next cycle. During // this time no other cycle can start. So, let's make sure that this // is the case. - guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); - - ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); - ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); - _parallel_workers->run_task(&task); + guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); + + clear_bitmap(_nextMarkBitMap, _parallel_workers, true); // Clear the liveness counting data. If the marking has been aborted, the abort() // call already did that. - if (cl.complete()) { + if (!has_aborted()) { clear_all_count_data(); } // Repeat the asserts from above. guarantee(cmThread()->during_cycle(), "invariant"); - guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); + guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); +} + +void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { + assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); + clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); } class CheckBitmapClearHRClosure : public HeapRegionClosure { @@ -848,7 +854,7 @@ // marking. reset_marking_state(true /* clear_overflow */); - log_info(gc)("Concurrent Mark reset for overflow"); + log_info(gc, marking)("Concurrent Mark reset for overflow"); } } @@ -983,13 +989,12 @@ } }; -void G1ConcurrentMark::scanRootRegions() { +void G1ConcurrentMark::scan_root_regions() { // scan_in_progress() will have been set to true only if there was // at least one root region to scan. So, if it's false, we // should not attempt to do any further work. if (root_regions()->scan_in_progress()) { assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); - GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); _parallel_marking_threads = calc_parallel_marking_threads(); assert(parallel_marking_threads() <= max_parallel_marking_threads(), @@ -1007,47 +1012,27 @@ } } -void G1ConcurrentMark::register_concurrent_phase_start(const char* title) { - uint old_val = 0; - do { - old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted); - } while (old_val != ConcPhaseNotStarted); - _g1h->gc_timer_cm()->register_gc_concurrent_start(title); +void G1ConcurrentMark::concurrent_cycle_start() { + _gc_timer_cm->register_gc_start(); + + _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); + + _g1h->trace_heap_before_gc(_gc_tracer_cm); } -void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) { - if (_concurrent_phase_status == ConcPhaseNotStarted) { - return; +void G1ConcurrentMark::concurrent_cycle_end() { + _g1h->trace_heap_after_gc(_gc_tracer_cm); + + if (has_aborted()) { + _gc_tracer_cm->report_concurrent_mode_failure(); } - uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted); - if (old_val == ConcPhaseStarted) { - _g1h->gc_timer_cm()->register_gc_concurrent_end(); - // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended. - // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent - // starting a new concurrent phase by 'ConcurrentMarkThread'. - if (end_timer) { - _g1h->gc_timer_cm()->register_gc_end(); - } - old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping); - assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope."); - } else { - do { - // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'. - os::naked_short_sleep(1); - } while (_concurrent_phase_status != ConcPhaseNotStarted); - } + _gc_timer_cm->register_gc_end(); + + _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); } -void G1ConcurrentMark::register_concurrent_phase_end() { - register_concurrent_phase_end_common(false); -} - -void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() { - register_concurrent_phase_end_common(true); -} - -void G1ConcurrentMark::markFromRoots() { +void G1ConcurrentMark::mark_from_roots() { // we might be tempted to assert that: // assert(asynch == !SafepointSynchronize::is_at_safepoint(), // "inconsistent argument?"); @@ -1110,7 +1095,6 @@ if (has_overflown()) { // Oops. We overflowed. Restart concurrent marking. _restart_for_overflow = true; - log_develop_trace(gc)("Remark led to restart for overflow."); // Verify the heap w.r.t. the previous marking bitmap. if (VerifyDuringGC) { @@ -1124,7 +1108,7 @@ reset_marking_state(); } else { { - GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm()); + GCTraceTime(Debug, gc, phases) trace("Aggregate Data", _gc_timer_cm); // Aggregate the per-task counting data that we have accumulated // while marking. @@ -1163,7 +1147,7 @@ g1p->record_concurrent_mark_remark_end(); G1CMIsAliveClosure is_alive(g1h); - g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); + _gc_tracer_cm->report_object_count_after_gc(&is_alive); } // Base class of the closures that finalize and verify the @@ -1752,11 +1736,9 @@ // sure we update the old gen/space data. g1h->g1mm()->update_sizes(); g1h->allocation_context_stats().update_after_mark(); - - g1h->trace_heap_after_concurrent_cycle(); } -void G1ConcurrentMark::completeCleanup() { +void G1ConcurrentMark::complete_cleanup() { if (has_aborted()) return; G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -2045,7 +2027,7 @@ // Inner scope to exclude the cleaning of the string and symbol // tables from the displayed time. { - GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm()); + GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2102,8 +2084,8 @@ &g1_keep_alive, &g1_drain_mark_stack, executor, - g1h->gc_timer_cm()); - g1h->gc_tracer_cm()->report_gc_reference_stats(stats); + _gc_timer_cm); + _gc_tracer_cm->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the @@ -2134,28 +2116,24 @@ assert(_markStack.isEmpty(), "Marking should have completed"); // Unload Klasses, String, Symbols, Code Cache, etc. - { - GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); - - if (ClassUnloadingWithConcurrentMark) { - bool purged_classes; - - { - GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); - purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); - } - - { - GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); - weakRefsWorkParallelPart(&g1_is_alive, purged_classes); - } + if (ClassUnloadingWithConcurrentMark) { + bool purged_classes; + + { + GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); + purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); } - if (G1StringDedup::is_enabled()) { - GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); - G1StringDedup::unlink(&g1_is_alive); + { + GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); + weakRefsWorkParallelPart(&g1_is_alive, purged_classes); } } + + if (G1StringDedup::is_enabled()) { + GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); + G1StringDedup::unlink(&g1_is_alive); + } } void G1ConcurrentMark::swapMarkBitMaps() { @@ -2273,7 +2251,7 @@ HandleMark hm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); + GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); g1h->ensure_parsability(false); @@ -2308,7 +2286,7 @@ void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { // Note we are overriding the read-only view of the prev map here, via // the cast. - ((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr); + ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); } HeapRegion* @@ -2605,7 +2583,7 @@ // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next // concurrent bitmap clearing. - _nextMarkBitMap->clearAll(); + clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); // Note we cannot clear the previous marking bitmap here // since VerifyDuringGC verifies the objects marked during @@ -2629,10 +2607,6 @@ satb_mq_set.set_active_all_threads( false, /* new active value */ satb_mq_set.is_active() /* expected_active */); - - _g1h->trace_heap_after_concurrent_cycle(); - - _g1h->register_concurrent_cycle_end(); } static void print_ms_time_info(const char* prefix, const char* name, @@ -2646,7 +2620,7 @@ } void G1ConcurrentMark::print_summary_info() { - LogHandle(gc, marking) log; + Log(gc, marking) log; if (!log.is_trace()) { return; } @@ -3554,8 +3528,6 @@ G1PrintRegionLivenessInfoClosure(const char* phase_name) : _total_used_bytes(0), _total_capacity_bytes(0), _total_prev_live_bytes(0), _total_next_live_bytes(0), - _hum_used_bytes(0), _hum_capacity_bytes(0), - _hum_prev_live_bytes(0), _hum_next_live_bytes(0), _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); MemRegion g1_reserved = g1h->g1_reserved(); @@ -3595,36 +3567,6 @@ "(bytes)", "(bytes)"); } -// It takes as a parameter a reference to one of the _hum_* fields, it -// deduces the corresponding value for a region in a humongous region -// series (either the region size, or what's left if the _hum_* field -// is < the region size), and updates the _hum_* field accordingly. -size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { - size_t bytes = 0; - // The > 0 check is to deal with the prev and next live bytes which - // could be 0. - if (*hum_bytes > 0) { - bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); - *hum_bytes -= bytes; - } - return bytes; -} - -// It deduces the values for a region in a humongous region series -// from the _hum_* fields and updates those accordingly. It assumes -// that that _hum_* fields have already been set up from the "starts -// humongous" region and we visit the regions in address order. -void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, - size_t* capacity_bytes, - size_t* prev_live_bytes, - size_t* next_live_bytes) { - assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); - *used_bytes = get_hum_bytes(&_hum_used_bytes); - *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); - *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); - *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); -} - bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { const char* type = r->get_type_str(); HeapWord* bottom = r->bottom(); @@ -3637,24 +3579,6 @@ size_t remset_bytes = r->rem_set()->mem_size(); size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); - if (r->is_starts_humongous()) { - assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && - _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, - "they should have been zeroed after the last time we used them"); - // Set up the _hum_* fields. - _hum_capacity_bytes = capacity_bytes; - _hum_used_bytes = used_bytes; - _hum_prev_live_bytes = prev_live_bytes; - _hum_next_live_bytes = next_live_bytes; - get_hum_bytes(&used_bytes, &capacity_bytes, - &prev_live_bytes, &next_live_bytes); - end = bottom + HeapRegion::GrainWords; - } else if (r->is_continues_humongous()) { - get_hum_bytes(&used_bytes, &capacity_bytes, - &prev_live_bytes, &next_live_bytes); - assert(end == bottom + HeapRegion::GrainWords, "invariant"); - } - _total_used_bytes += used_bytes; _total_capacity_bytes += capacity_bytes; _total_prev_live_bytes += prev_live_bytes; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp --- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -34,6 +34,8 @@ class G1CMBitMap; class G1CMTask; class G1ConcurrentMark; +class ConcurrentGCTimer; +class G1OldTracer; typedef GenericTaskQueue G1CMTaskQueue; typedef GenericTaskQueueSet G1CMTaskQueueSet; @@ -139,10 +141,7 @@ inline void clear(HeapWord* addr); inline bool parMark(HeapWord* addr); - void clearRange(MemRegion mr); - - // Clear the whole mark bitmap. - void clearAll(); + void clear_range(MemRegion mr); }; // Represents a marking stack used by ConcurrentMarking in the G1 collector. @@ -352,17 +351,9 @@ // time of remark. volatile bool _concurrent_marking_in_progress; - // There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort()) - // to call ConcurrentGCTimer::register_gc_concurrent_end(). - // And this variable is used to keep track of concurrent phase. - volatile uint _concurrent_phase_status; - // Concurrent phase is not yet started. - static const uint ConcPhaseNotStarted = 0; - // Concurrent phase is started. - static const uint ConcPhaseStarted = 1; - // Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase. - // So other thread should wait until the status to be changed to ConcPhaseNotStarted. - static const uint ConcPhaseStopping = 2; + ConcurrentGCTimer* _gc_timer_cm; + + G1OldTracer* _gc_tracer_cm; // All of these times are in ms NumberSeq _init_times; @@ -497,6 +488,9 @@ // end_timer, true to end gc timer after ending concurrent phase. void register_concurrent_phase_end_common(bool end_timer); + // Clear the given bitmap in parallel using the given WorkGang. If may_yield is + // true, periodically insert checks to see if this method should exit prematurely. + void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); public: // Manipulation of the global mark stack. // The push and pop operations are used by tasks for transfers @@ -530,10 +524,8 @@ _concurrent_marking_in_progress = false; } - void register_concurrent_phase_start(const char* title); - void register_concurrent_phase_end(); - // Ends both concurrent phase and timer. - void register_concurrent_gc_end_and_stop_timer(); + void concurrent_cycle_start(); + void concurrent_cycle_end(); void update_accum_task_vtime(int i, double vtime) { _accum_task_vtime[i] += vtime; @@ -585,8 +577,13 @@ uint worker_id, HeapRegion* hr = NULL); - // Clear the next marking bitmap (will be called concurrently). - void clearNextBitmap(); + // Prepare internal data structures for the next mark cycle. This includes clearing + // the next mark bitmap and some internal data structures. This method is intended + // to be called concurrently to the mutator. It will yield to safepoint requests. + void cleanup_for_next_mark(); + + // Clear the previous marking bitmap during safepoint. + void clear_prev_bitmap(WorkGang* workers); // Return whether the next mark bitmap has no marks set. To be used for assertions // only. Will not yield to pause requests. @@ -603,18 +600,18 @@ // Scan all the root regions and mark everything reachable from // them. - void scanRootRegions(); + void scan_root_regions(); // Scan a single root region and mark everything reachable from it. void scanRootRegion(HeapRegion* hr, uint worker_id); // Do concurrent phase of marking, to a tentative transitive closure. - void markFromRoots(); + void mark_from_roots(); void checkpointRootsFinal(bool clear_all_soft_refs); void checkpointRootsFinalWork(); void cleanup(); - void completeCleanup(); + void complete_cleanup(); // Mark in the previous bitmap. NB: this is usually read-only, so use // this carefully! @@ -730,6 +727,9 @@ return _completed_initialization; } + ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } + G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } + protected: // Clear all the per-task bitmaps and arrays used to store the // counting data. @@ -996,18 +996,6 @@ size_t _total_prev_live_bytes; size_t _total_next_live_bytes; - // These are set up when we come across a "stars humongous" region - // (as this is where most of this information is stored, not in the - // subsequent "continues humongous" regions). After that, for every - // region in a given humongous region series we deduce the right - // values for it by simply subtracting the appropriate amount from - // these fields. All these values should reach 0 after we've visited - // the last region in the series. - size_t _hum_used_bytes; - size_t _hum_capacity_bytes; - size_t _hum_prev_live_bytes; - size_t _hum_next_live_bytes; - // Accumulator for the remembered set size size_t _total_remset_bytes; @@ -1026,11 +1014,6 @@ return (double) val / (double) M; } - // See the .cpp file. - size_t get_hum_bytes(size_t* hum_bytes); - void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, - size_t* prev_live_bytes, size_t* next_live_bytes); - public: // The header and footer are printed in the constructor and // destructor respectively. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1EvacStats.cpp --- a/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1EvacStats.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -110,15 +110,9 @@ size_t const cur_plab_sz = (size_t)((double)total_waste_allowed / G1LastPLABAverageOccupancy); // Take historical weighted average _filter.sample(cur_plab_sz); - // Clip from above and below, and align to object boundary - size_t plab_sz; - plab_sz = MAX2(min_size(), (size_t)_filter.average()); - plab_sz = MIN2(max_size(), plab_sz); - plab_sz = align_object_size(plab_sz); - // Latch the result - _desired_net_plab_sz = plab_sz; + _desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average()); - log_sizing(cur_plab_sz, plab_sz); + log_sizing(cur_plab_sz, _desired_net_plab_sz); // Clear accumulators for next round. reset(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1FromCardCache.cpp --- a/hotspot/src/share/vm/gc/g1/g1FromCardCache.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1FromCardCache.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,8 +37,8 @@ guarantee(_cache == NULL, "Should not call this multiple times"); _max_regions = max_num_regions; - _cache = Padded2DArray::create_unfreeable(num_par_rem_sets, - _max_regions, + _cache = Padded2DArray::create_unfreeable(_max_regions, + num_par_rem_sets, &_static_mem_size); invalidate(0, _max_regions); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1FromCardCache.hpp --- a/hotspot/src/share/vm/gc/g1/g1FromCardCache.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1FromCardCache.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,8 +32,11 @@ // a per-region and per-thread basis. class G1FromCardCache : public AllStatic { private: - // Array of card indices. Indexed by thread X and heap region to minimize + // Array of card indices. Indexed by heap region (rows) and thread (columns) to minimize // thread contention. + // This order minimizes the time to clear all entries for a given region during region + // freeing. I.e. a single clear of a single memory area instead of multiple separate + // accesses with a large stride per region. static int** _cache; static uint _max_regions; static size_t _static_mem_size; @@ -58,11 +61,11 @@ } static int at(uint worker_id, uint region_idx) { - return _cache[worker_id][region_idx]; + return _cache[region_idx][worker_id]; } static void set(uint worker_id, uint region_idx, int val) { - _cache[worker_id][region_idx] = val; + _cache[region_idx][worker_id] = val; } static void initialize(uint num_par_rem_sets, uint max_num_regions); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp --- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,7 +33,7 @@ #include "runtime/timer.hpp" #include "runtime/os.hpp" -static const char* Indents[5] = {"", " ", " ", " ", " "}; +static const char* Indents[5] = {"", " ", " ", " ", " "}; G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : _max_gc_threads(max_gc_threads) @@ -94,11 +94,8 @@ _gc_par_phases[PreserveCMReferents] = new WorkerDataArray(max_gc_threads, "Parallel Preserve CM Refs (ms):"); } -void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { - assert(active_gc_threads > 0, "The number of threads must be > 0"); - assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads"); +void G1GCPhaseTimes::note_gc_start() { _gc_start_counter = os::elapsed_counter(); - _active_gc_threads = active_gc_threads; _cur_expand_heap_time_ms = 0.0; _external_accounted_time_ms = 0.0; @@ -109,31 +106,55 @@ } } +#define ASSERT_PHASE_UNINITIALIZED(phase) \ + assert(_gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); + +double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) { + double value = _gc_par_phases[phase]->get(worker); + if (value != WorkerDataArray::uninitialized()) { + return value; + } + return 0.0; +} + void G1GCPhaseTimes::note_gc_end() { _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter); - for (uint i = 0; i < _active_gc_threads; i++) { - double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i); - record_time_secs(GCWorkerTotal, i , worker_time); + + double uninitialized = WorkerDataArray::uninitialized(); + + for (uint i = 0; i < _max_gc_threads; i++) { + double worker_start = _gc_par_phases[GCWorkerStart]->get(i); + if (worker_start != uninitialized) { + assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended."); + double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i); + record_time_secs(GCWorkerTotal, i , total_worker_time); - double worker_known_time = - _gc_par_phases[ExtRootScan]->get(i) + - _gc_par_phases[SATBFiltering]->get(i) + - _gc_par_phases[UpdateRS]->get(i) + - _gc_par_phases[ScanRS]->get(i) + - _gc_par_phases[CodeRoots]->get(i) + - _gc_par_phases[ObjCopy]->get(i) + - _gc_par_phases[Termination]->get(i); + double worker_known_time = + worker_time(ExtRootScan, i) + + worker_time(SATBFiltering, i) + + worker_time(UpdateRS, i) + + worker_time(ScanRS, i) + + worker_time(CodeRoots, i) + + worker_time(ObjCopy, i) + + worker_time(Termination, i); - record_time_secs(Other, i, worker_time - worker_known_time); - } - - for (int i = 0; i < GCParPhasesSentinel; i++) { - if (_gc_par_phases[i] != NULL) { - _gc_par_phases[i]->verify(_active_gc_threads); + record_time_secs(Other, i, total_worker_time - worker_known_time); + } else { + // Make sure all slots are uninitialized since this thread did not seem to have been started + ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd); + ASSERT_PHASE_UNINITIALIZED(ExtRootScan); + ASSERT_PHASE_UNINITIALIZED(SATBFiltering); + ASSERT_PHASE_UNINITIALIZED(UpdateRS); + ASSERT_PHASE_UNINITIALIZED(ScanRS); + ASSERT_PHASE_UNINITIALIZED(CodeRoots); + ASSERT_PHASE_UNINITIALIZED(ObjCopy); + ASSERT_PHASE_UNINITIALIZED(Termination); } } } +#undef ASSERT_PHASE_UNINITIALIZED + // record the time a phase took in seconds void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) { _gc_par_phases[phase]->set(worker_i, secs); @@ -150,39 +171,39 @@ // return the average time for a phase in milliseconds double G1GCPhaseTimes::average_time_ms(GCParPhases phase) { - return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0; + return _gc_par_phases[phase]->average() * 1000.0; } size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) { assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count"); - return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads); + return _gc_par_phases[phase]->thread_work_items()->sum(); } template void G1GCPhaseTimes::details(T* phase, const char* indent) { - LogHandle(gc, phases, task) log; + Log(gc, phases, task) log; if (log.is_level(LogLevel::Trace)) { outputStream* trace_out = log.trace_stream(); trace_out->print("%s", indent); - phase->print_details_on(trace_out, _active_gc_threads); + phase->print_details_on(trace_out); } } void G1GCPhaseTimes::log_phase(WorkerDataArray* phase, uint indent, outputStream* out, bool print_sum) { out->print("%s", Indents[indent]); - phase->print_summary_on(out, _active_gc_threads, print_sum); + phase->print_summary_on(out, print_sum); details(phase, Indents[indent]); WorkerDataArray* work_items = phase->thread_work_items(); if (work_items != NULL) { out->print("%s", Indents[indent + 1]); - work_items->print_summary_on(out, _active_gc_threads, true); + work_items->print_summary_on(out, true); details(work_items, Indents[indent + 1]); } } void G1GCPhaseTimes::debug_phase(WorkerDataArray* phase) { - LogHandle(gc, phases) log; + Log(gc, phases) log; if (log.is_level(LogLevel::Debug)) { ResourceMark rm; log_phase(phase, 2, log.debug_stream(), true); @@ -190,7 +211,7 @@ } void G1GCPhaseTimes::trace_phase(WorkerDataArray* phase, bool print_sum) { - LogHandle(gc, phases) log; + Log(gc, phases) log; if (log.is_level(LogLevel::Trace)) { ResourceMark rm; log_phase(phase, 3, log.trace_stream(), print_sum); @@ -277,11 +298,11 @@ } debug_line("Choose CSet", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms)); debug_line("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms); + trace_phase(_gc_par_phases[PreserveCMReferents]); debug_line("Reference Processing", _cur_ref_proc_time_ms); debug_line("Reference Enqueuing", _cur_ref_enq_time_ms); debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms); trace_phase(_gc_par_phases[RedirtyCards]); - trace_phase(_gc_par_phases[PreserveCMReferents]); if (G1EagerReclaimHumongousObjects) { debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); trace_line_sz("Humongous Total", _cur_fast_reclaim_humongous_total); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp --- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,7 +32,6 @@ template class WorkerDataArray; class G1GCPhaseTimes : public CHeapObj { - uint _active_gc_threads; uint _max_gc_threads; jlong _gc_start_counter; double _gc_pause_time_ms; @@ -123,6 +122,7 @@ double _cur_verify_before_time_ms; double _cur_verify_after_time_ms; + double worker_time(GCParPhases phase, uint worker); void note_gc_end(); template @@ -133,7 +133,7 @@ public: G1GCPhaseTimes(uint max_gc_threads); - void note_gc_start(uint active_gc_threads); + void note_gc_start(); void print(); // record the time a phase took in seconds diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1HeapSizingPolicy.hpp" +#include "gc/g1/g1Analytics.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) : + _g1(g1), + _analytics(analytics), + _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { + assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); + clear_ratio_check_data(); + } + +void G1HeapSizingPolicy::clear_ratio_check_data() { + _ratio_over_threshold_count = 0; + _ratio_over_threshold_sum = 0.0; + _pauses_since_start = 0; +} + +size_t G1HeapSizingPolicy::expansion_amount() { + double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; + double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; + assert(GCTimeRatio > 0, + "we should have set it to a default value set_g1_gc_flags() " + "if a user set it to 0"); + const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); + + double threshold = gc_overhead_perc; + size_t expand_bytes = 0; + + // If the heap is at less than half its maximum size, scale the threshold down, + // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, + // though the scaling code will likely keep the increase small. + if (_g1->capacity() <= _g1->max_capacity() / 2) { + threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); + threshold = MAX2(threshold, 1.0); + } + + // If the last GC time ratio is over the threshold, increment the count of + // times it has been exceeded, and add this ratio to the sum of exceeded + // ratios. + if (last_gc_overhead > threshold) { + _ratio_over_threshold_count++; + _ratio_over_threshold_sum += last_gc_overhead; + } + + // Check if we've had enough GC time ratio checks that were over the + // threshold to trigger an expansion. We'll also expand if we've + // reached the end of the history buffer and the average of all entries + // is still over the threshold. This indicates a smaller number of GCs were + // long enough to make the average exceed the threshold. + bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; + if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || + (filled_history_buffer && (recent_gc_overhead > threshold))) { + size_t min_expand_bytes = HeapRegion::GrainBytes; + size_t reserved_bytes = _g1->max_capacity(); + size_t committed_bytes = _g1->capacity(); + size_t uncommitted_bytes = reserved_bytes - committed_bytes; + size_t expand_bytes_via_pct = + uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; + double scale_factor = 1.0; + + // If the current size is less than 1/4 of the Initial heap size, expand + // by half of the delta between the current and Initial sizes. IE, grow + // back quickly. + // + // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of + // the available expansion space, whichever is smaller, as the base + // expansion size. Then possibly scale this size according to how much the + // threshold has (on average) been exceeded by. If the delta is small + // (less than the StartScaleDownAt value), scale the size down linearly, but + // not by less than MinScaleDownFactor. If the delta is large (greater than + // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor + // times the base size. The scaling will be linear in the range from + // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, + // ScaleUpRange sets the rate of scaling up. + if (committed_bytes < InitialHeapSize / 4) { + expand_bytes = (InitialHeapSize - committed_bytes) / 2; + } else { + double const MinScaleDownFactor = 0.2; + double const MaxScaleUpFactor = 2; + double const StartScaleDownAt = gc_overhead_perc; + double const StartScaleUpAt = gc_overhead_perc * 1.5; + double const ScaleUpRange = gc_overhead_perc * 2.0; + + double ratio_delta; + if (filled_history_buffer) { + ratio_delta = recent_gc_overhead - threshold; + } else { + ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; + } + + expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); + if (ratio_delta < StartScaleDownAt) { + scale_factor = ratio_delta / StartScaleDownAt; + scale_factor = MAX2(scale_factor, MinScaleDownFactor); + } else if (ratio_delta > StartScaleUpAt) { + scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); + scale_factor = MIN2(scale_factor, MaxScaleUpFactor); + } + } + + log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " + "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", + recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); + + expand_bytes = static_cast(expand_bytes * scale_factor); + + // Ensure the expansion size is at least the minimum growth amount + // and at most the remaining uncommitted byte size. + expand_bytes = MAX2(expand_bytes, min_expand_bytes); + expand_bytes = MIN2(expand_bytes, uncommitted_bytes); + + clear_ratio_check_data(); + } else { + // An expansion was not triggered. If we've started counting, increment + // the number of checks we've made in the current window. If we've + // reached the end of the window without resizing, clear the counters to + // start again the next time we see a ratio above the threshold. + if (_ratio_over_threshold_count > 0) { + _pauses_since_start++; + if (_pauses_since_start > _num_prev_pauses_for_heuristics) { + clear_ratio_check_data(); + } + } + } + + return expand_bytes; +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP +#define SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP + +#include "memory/allocation.hpp" + +class G1Analytics; +class G1CollectedHeap; + +class G1HeapSizingPolicy: public CHeapObj { + // MinOverThresholdForGrowth must be less than the number of recorded + // pause times in G1Analytics, representing the minimum number of pause + // time ratios that exceed GCTimeRatio before a heap expansion will be triggered. + const static uint MinOverThresholdForGrowth = 4; + + const G1CollectedHeap* _g1; + const G1Analytics* _analytics; + + const uint _num_prev_pauses_for_heuristics; + // Ratio check data for determining if heap growth is necessary. + uint _ratio_over_threshold_count; + double _ratio_over_threshold_sum; + uint _pauses_since_start; + + +protected: + G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics); +public: + + // If an expansion would be appropriate, because recent GC overhead had + // exceeded the desired limit, return an amount to expand by. + virtual size_t expansion_amount(); + + // Clear ratio tracking data used by expansion_amount(). + void clear_ratio_check_data(); + + static G1HeapSizingPolicy* create(const G1CollectedHeap* g1, const G1Analytics* analytics); +}; + +#endif // SRC_SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy_ext.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1HeapSizingPolicy_ext.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1HeapSizingPolicy.hpp" + +G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1, const G1Analytics* analytics) { + return new G1HeapSizingPolicy(g1, analytics); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp --- a/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -82,8 +82,8 @@ void G1HeapTransition::print() { Data after(_g1_heap); - size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length; - size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions(); + size_t eden_capacity_length_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length; + size_t survivor_capacity_length_after_gc = _g1_heap->g1_policy()->max_survivor_regions(); DetailedUsage usage; if (log_is_enabled(Trace, gc, heap)) { @@ -100,11 +100,11 @@ } log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")", - _before._eden_length, after._eden_length, eden_capacity_bytes_after_gc); + _before._eden_length, after._eden_length, eden_capacity_length_after_gc); log_trace(gc, heap)(" Used: 0K, Waste: 0K"); log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")", - _before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc); + _before._survivor_length, after._survivor_length, survivor_capacity_length_after_gc); log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K", usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp --- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -60,7 +60,7 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { - LogHandle(gc, verify) log; + Log(gc, verify) log; log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); if (_vo == VerifyOption_G1UseMarkWord) { log.error(" Mark word: " PTR_FORMAT, p2i(obj->mark())); @@ -406,7 +406,7 @@ // It helps to have the per-region information in the output to // help us track down what went wrong. This is why we call // print_extended_on() instead of print_on(). - LogHandle(gc, verify) log; + Log(gc, verify) log; ResourceMark rm; _g1h->print_extended_on(log.error_stream()); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp --- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -36,7 +36,7 @@ _use_cache = true; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; - _hot_cache = _hot_cache_memory.allocate(_hot_cache_size); + _hot_cache = ArrayAllocator::allocate(_hot_cache_size); reset_hot_cache_internal(); @@ -51,7 +51,7 @@ G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); - _hot_cache_memory.free(); + ArrayAllocator::free(_hot_cache, _hot_cache_size); _hot_cache = NULL; } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp --- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -61,7 +61,6 @@ G1CardCounts _card_counts; - ArrayAllocator _hot_cache_memory; // The card cache table jbyte** _hot_cache; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp --- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -122,7 +122,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer()); + GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer()); G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -137,34 +137,49 @@ &follow_code_closure); } - // Process reference objects found during marking - ReferenceProcessor* rp = GenMarkSweep::ref_processor(); - assert(rp == g1h->ref_processor_stw(), "Sanity"); + { + GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer()); + + // Process reference objects found during marking + ReferenceProcessor* rp = GenMarkSweep::ref_processor(); + assert(rp == g1h->ref_processor_stw(), "Sanity"); - rp->setup_policy(clear_all_softrefs); - const ReferenceProcessorStats& stats = - rp->process_discovered_references(&GenMarkSweep::is_alive, - &GenMarkSweep::keep_alive, - &GenMarkSweep::follow_stack_closure, - NULL, - gc_timer()); - gc_tracer()->report_gc_reference_stats(stats); - + rp->setup_policy(clear_all_softrefs); + const ReferenceProcessorStats& stats = + rp->process_discovered_references(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + &GenMarkSweep::follow_stack_closure, + NULL, + gc_timer()); + gc_tracer()->report_gc_reference_stats(stats); + } // This is the point where the entire marking should have completed. assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); - // Unload classes and purge the SystemDictionary. - bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); + { + GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer()); - // Unload nmethods. - CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); + // Unload classes and purge the SystemDictionary. + bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); + + // Unload nmethods. + CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); - // Prune dead klasses from subklass/sibling/implementor lists. - Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); + } - // Delete entries for dead interned string and clean up unreferenced symbols in symbol table. - g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive); + { + GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer()); + // Delete entries for dead interned string and clean up unreferenced symbols in symbol table. + g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive); + } + + if (G1StringDedup::is_enabled()) { + GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer()); + G1StringDedup::unlink(&GenMarkSweep::is_alive); + } if (VerifyDuringGC) { HandleMark hm; // handle scope @@ -197,7 +212,7 @@ // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. - GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer()); + GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer()); prepare_compaction(); } @@ -220,17 +235,11 @@ } }; -class G1AlwaysTrueClosure: public BoolObjectClosure { -public: - bool do_object_b(oop p) { return true; } -}; -static G1AlwaysTrueClosure always_true; - void G1MarkSweep::mark_sweep_phase3() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer()); + GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); // Need cleared claim bits for the roots processing ClassLoaderDataGraph::clear_claimed_marks(); @@ -248,7 +257,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure); + JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); if (G1StringDedup::is_enabled()) { G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); @@ -291,7 +300,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer()); + GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer()); G1SpaceCompactClosure blk; g1h->heap_region_iterate(&blk); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp --- a/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1MonitoringSupport.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,7 +178,7 @@ // of a GC). uint young_list_length = g1->young_list()->length(); - uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions(); + uint survivor_list_length = g1->young_list()->survivor_length(); assert(young_list_length >= survivor_list_length, "invariant"); uint eden_list_length = young_list_length - survivor_list_length; // Max length includes any potential extensions to the young gen diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1MonitoringSupport.hpp diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp --- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -176,13 +176,22 @@ #endif // ASSERT assert(_from != NULL, "from region must be non-NULL"); - assert(_from->is_in_reserved(p), "p is not in from"); + assert(_from->is_in_reserved(p) || + (_from->is_humongous() && + _g1->heap_region_containing(p)->is_humongous() && + _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()), + "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.", + p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index()); HeapRegion* to = _g1->heap_region_containing(obj); if (_from == to) { // Normally this closure should only be called with cross-region references. // But since Java threads are manipulating the references concurrently and we // reload the values things may have changed. + // Also this check lets slip through references from a humongous continues region + // to its humongous start region, as they are in different regions, and adds a + // remembered set entry. This is benign (apart from memory usage), as we never + // try to either evacuate or eager reclaim these kind of regions. return; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp --- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp" #include "gc/g1/g1RootClosures.hpp" @@ -80,7 +81,7 @@ _plab_allocator->flush_and_retire_stats(); _g1h->g1_policy()->record_age_table(&_age_table); - uint length = _g1h->g1_policy()->young_cset_region_length(); + uint length = _g1h->collection_set()->young_region_length(); for (uint region_index = 0; region_index < length; region_index++) { surviving_young_words[region_index] += _surviving_young_words[region_index]; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1RemSet.cpp --- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -39,6 +39,7 @@ #include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/intHisto.hpp" @@ -536,7 +537,7 @@ current.initialize(this); _prev_period_summary.subtract_from(¤t); - LogHandle(gc, remset) log; + Log(gc, remset) log; log.trace("%s", header); ResourceMark rm; _prev_period_summary.print_on(log.trace_stream()); @@ -546,7 +547,7 @@ } void G1RemSet::print_summary_info() { - LogHandle(gc, remset, exit) log; + Log(gc, remset, exit) log; if (log.is_trace()) { log.trace(" Cumulative RS summary"); G1RemSetSummary current; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1StringDedup.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ void G1StringDedup::stop() { assert(is_enabled(), "String deduplication not enabled"); - G1StringDedupThread::stop(); + G1StringDedupThread::thread()->stop(); } bool G1StringDedup::is_candidate_from_mark(oop obj) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -570,7 +570,7 @@ } void G1StringDedupTable::print_statistics() { - LogHandle(gc, stringdedup) log; + Log(gc, stringdedup) log; log.debug(" [Table]"); log.debug(" [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]", G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry))); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,11 +81,9 @@ StringTable::shared_oops_do(&sharedStringDedup); } -void G1StringDedupThread::run() { +void G1StringDedupThread::run_service() { G1StringDedupStat total_stat; - initialize_in_thread(); - wait_for_universe_init(); deduplicate_shared_strings(total_stat); // Main loop @@ -96,7 +94,7 @@ // Wait for the queue to become non-empty G1StringDedupQueue::wait(); - if (_should_terminate) { + if (should_terminate()) { break; } @@ -133,23 +131,10 @@ } } - terminate(); } -void G1StringDedupThread::stop() { - { - MonitorLockerEx ml(Terminator_lock); - _thread->_should_terminate = true; - } - +void G1StringDedupThread::stop_service() { G1StringDedupQueue::cancel_wait(); - - { - MonitorLockerEx ml(Terminator_lock); - while (!_thread->_has_terminated) { - ml.wait(); - } - } } void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1StringDedupThread.hpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedupThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedupThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,14 +45,14 @@ void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat); + void run_service(); + void stop_service(); + public: static void create(); - static void stop(); static G1StringDedupThread* thread(); - virtual void run(); - void deduplicate_shared_strings(G1StringDedupStat& stat); }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1YoungGenSizer.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1YoungGenSizer.hpp" +#include "gc/g1/heapRegion.hpp" + +G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), + _min_desired_young_length(0), _max_desired_young_length(0) { + if (FLAG_IS_CMDLINE(NewRatio)) { + if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { + warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); + } else { + _sizer_kind = SizerNewRatio; + _adaptive_size = false; + return; + } + } + + if (NewSize > MaxNewSize) { + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + MaxNewSize = NewSize; + } + + if (FLAG_IS_CMDLINE(NewSize)) { + _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), + 1U); + if (FLAG_IS_CMDLINE(MaxNewSize)) { + _max_desired_young_length = + MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), + 1U); + _sizer_kind = SizerMaxAndNewSize; + _adaptive_size = _min_desired_young_length == _max_desired_young_length; + } else { + _sizer_kind = SizerNewSizeOnly; + } + } else if (FLAG_IS_CMDLINE(MaxNewSize)) { + _max_desired_young_length = + MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), + 1U); + _sizer_kind = SizerMaxNewSizeOnly; + } +} + +uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { + uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; + return MAX2(1U, default_value); +} + +uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { + uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; + return MAX2(1U, default_value); +} + +void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { + assert(number_of_heap_regions > 0, "Heap must be initialized"); + + switch (_sizer_kind) { + case SizerDefaults: + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *max_young_length = calculate_default_max_length(number_of_heap_regions); + break; + case SizerNewSizeOnly: + *max_young_length = calculate_default_max_length(number_of_heap_regions); + *max_young_length = MAX2(*min_young_length, *max_young_length); + break; + case SizerMaxNewSizeOnly: + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *min_young_length = MIN2(*min_young_length, *max_young_length); + break; + case SizerMaxAndNewSize: + // Do nothing. Values set on the command line, don't update them at runtime. + break; + case SizerNewRatio: + *min_young_length = number_of_heap_regions / (NewRatio + 1); + *max_young_length = *min_young_length; + break; + default: + ShouldNotReachHere(); + } + + assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); +} + +uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { + // We need to pass the desired values because recalculation may not update these + // values in some cases. + uint temp = _min_desired_young_length; + uint result = _max_desired_young_length; + recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); + return result; +} + +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { + recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, + &_max_desired_young_length); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1YoungGenSizer.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/g1/g1YoungGenSizer.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "memory/allocation.hpp" + +// There are three command line options related to the young gen size: +// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is +// just a short form for NewSize==MaxNewSize). G1 will use its internal +// heuristics to calculate the actual young gen size, so these options +// basically only limit the range within which G1 can pick a young gen +// size. Also, these are general options taking byte sizes. G1 will +// internally work with a number of regions instead. So, some rounding +// will occur. +// +// If nothing related to the the young gen size is set on the command +// line we should allow the young gen to be between G1NewSizePercent +// and G1MaxNewSizePercent of the heap size. This means that every time +// the heap size changes, the limits for the young gen size will be +// recalculated. +// +// If only -XX:NewSize is set we should use the specified value as the +// minimum size for young gen. Still using G1MaxNewSizePercent of the +// heap as maximum. +// +// If only -XX:MaxNewSize is set we should use the specified value as the +// maximum size for young gen. Still using G1NewSizePercent of the heap +// as minimum. +// +// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values. +// No updates when the heap size changes. There is a special case when +// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a +// different heuristic for calculating the collection set when we do mixed +// collection. +// +// If only -XX:NewRatio is set we should use the specified ratio of the heap +// as both min and max. This will be interpreted as "fixed" just like the +// NewSize==MaxNewSize case above. But we will update the min and max +// every time the heap size changes. +// +// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is +// combined with either NewSize or MaxNewSize. (A warning message is printed.) +class G1YoungGenSizer : public CHeapObj { +private: + enum SizerKind { + SizerDefaults, + SizerNewSizeOnly, + SizerMaxNewSizeOnly, + SizerMaxAndNewSize, + SizerNewRatio + }; + SizerKind _sizer_kind; + uint _min_desired_young_length; + uint _max_desired_young_length; + bool _adaptive_size; + uint calculate_default_min_length(uint new_number_of_heap_regions); + uint calculate_default_max_length(uint new_number_of_heap_regions); + + // Update the given values for minimum and maximum young gen length in regions + // given the number of heap regions depending on the kind of sizing algorithm. + void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); + +public: + G1YoungGenSizer(); + // Calculate the maximum length of the young gen given the number of regions + // depending on the sizing algorithm. + uint max_young_length(uint number_of_heap_regions); + + void heap_size_changed(uint new_number_of_heap_regions); + uint min_desired_young_length() { + return _min_desired_young_length; + } + uint max_desired_young_length() { + return _max_desired_young_length; + } + + bool adaptive_young_list_length() const { + return _adaptive_size; + } +}; + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp --- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,38 +25,13 @@ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "runtime/mutexLocker.hpp" -void G1YoungRemSetSamplingThread::run() { - initialize_in_thread(); - wait_for_universe_init(); - - run_service(); - - terminate(); -} - -void G1YoungRemSetSamplingThread::stop() { - // it is ok to take late safepoints here, if needed - { - MutexLockerEx mu(Terminator_lock); - _should_terminate = true; - } - - stop_service(); - - { - MutexLockerEx mu(Terminator_lock); - while (!_has_terminated) { - Terminator_lock->wait(); - } - } -} - G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread(), _monitor(Mutex::nonleaf, @@ -69,7 +44,7 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() { MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); - if (!_should_terminate) { + if (!should_terminate()) { uintx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be? _monitor.wait(Mutex::_no_safepoint_check_flag, waitms); } @@ -78,7 +53,7 @@ void G1YoungRemSetSamplingThread::run_service() { double vtime_start = os::elapsedVTime(); - while (!_should_terminate) { + while (!should_terminate()) { sample_young_list_rs_lengths(); if (os::supports_vtime()) { @@ -114,7 +89,7 @@ // retired as the current allocation region). if (hr->in_collection_set()) { // Update the collection set policy information for this region - g1p->update_incremental_cset_info(hr, rs_length); + g1h->collection_set()->update_young_region_prediction(hr, rs_length); } ++regions_visited; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp --- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -55,9 +55,6 @@ public: G1YoungRemSetSamplingThread(); double vtime_accum() { return _vtime_accum; } - - virtual void run(); - void stop(); }; #endif // SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/g1_globals.hpp --- a/hotspot/src/share/vm/gc/g1/g1_globals.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/g1_globals.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -233,10 +233,6 @@ "Raise a fatal VM exit out of memory failure in the event " \ " that heap expansion fails due to running out of swap.") \ \ - develop(uintx, G1ConcMarkForceOverflow, 0, \ - "The number of times we'll force an overflow during " \ - "concurrent marking") \ - \ experimental(uintx, G1MaxNewSizePercent, 60, \ "Percentage (0-100) of the heap size to use as default " \ " maximum young gen size.") \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/heapRegion.cpp --- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,10 +34,10 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionTracer.hpp" #include "gc/shared/genOopClosures.inline.hpp" -#include "gc/shared/liveRange.hpp" #include "gc/shared/space.inline.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.inline.hpp" @@ -695,7 +695,7 @@ template void verify_liveness(T* p) { T heap_oop = oopDesc::load_heap_oop(p); - LogHandle(gc, verify) log; + Log(gc, verify) log; if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); bool failed = false; @@ -749,7 +749,7 @@ template void verify_remembered_set(T* p) { T heap_oop = oopDesc::load_heap_oop(p); - LogHandle(gc, verify) log; + Log(gc, verify) log; if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); bool failed = false; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/heapRegionBounds.hpp --- a/hotspot/src/share/vm/gc/g1/heapRegionBounds.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/heapRegionBounds.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ #ifndef SHARE_VM_GC_G1_HEAPREGIONBOUNDS_HPP #define SHARE_VM_GC_G1_HEAPREGIONBOUNDS_HPP +#include "memory/allocation.hpp" + class HeapRegionBounds : public AllStatic { private: // Minimum region size; we won't go lower than that. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/ptrQueue.cpp --- a/hotspot/src/share/vm/gc/g1/ptrQueue.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/ptrQueue.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -43,16 +43,12 @@ void PtrQueue::flush_impl() { if (!_permanent && _buf != NULL) { - if (_index == _sz) { + BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); + if (is_empty()) { // No work to do. - qset()->deallocate_buffer(_buf); + qset()->deallocate_buffer(node); } else { - // We must NULL out the unused entries, then enqueue. - size_t limit = byte_index_to_index(_index); - for (size_t i = 0; i < limit; ++i) { - _buf[i] = NULL; - } - qset()->enqueue_complete_buffer(_buf); + qset()->enqueue_complete_buffer(node); } _buf = NULL; _index = 0; @@ -74,7 +70,7 @@ assert(_index <= _sz, "Invariant."); } -void PtrQueue::locking_enqueue_completed_buffer(void** buf) { +void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { assert(_lock->owned_by_self(), "Required."); // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before @@ -82,7 +78,7 @@ // have the same rank and we may get the "possible deadlock" message _lock->unlock(); - qset()->enqueue_complete_buffer(buf); + qset()->enqueue_complete_buffer(node); // We must relock only because the caller will unlock, for the normal // case. _lock->lock_without_safepoint_check(); @@ -157,10 +153,9 @@ return BufferNode::make_buffer_from_node(node); } -void PtrQueueSet::deallocate_buffer(void** buf) { +void PtrQueueSet::deallocate_buffer(BufferNode* node) { assert(_sz > 0, "Didn't set a buffer size."); MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); - BufferNode *node = BufferNode::make_node_from_buffer(buf); node->set_next(_fl_owner->_buf_free_list); _fl_owner->_buf_free_list = node; _fl_owner->_buf_free_list_sz++; @@ -211,10 +206,10 @@ // preventing the subsequent the multiple enqueue, and // install a newly allocated buffer below. - void** buf = _buf; // local pointer to completed buffer + BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); _buf = NULL; // clear shared _buf field - locking_enqueue_completed_buffer(buf); // enqueue completed buffer + locking_enqueue_completed_buffer(node); // enqueue completed buffer // While the current thread was enqueueing the buffer another thread // may have a allocated a new buffer and inserted it into this pointer @@ -224,9 +219,11 @@ if (_buf != NULL) return; } else { - if (qset()->process_or_enqueue_complete_buffer(_buf)) { + BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); + if (qset()->process_or_enqueue_complete_buffer(node)) { // Recycle the buffer. No allocation. - _sz = qset()->buffer_size(); + assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); + assert(_sz == qset()->buffer_size(), "invariant"); _index = _sz; return; } @@ -238,12 +235,12 @@ _index = _sz; } -bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { +bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { if (Thread::current()->is_Java_thread()) { // We don't lock. It is fine to be epsilon-precise here. if (_max_completed_queue == 0 || _max_completed_queue > 0 && _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { - bool b = mut_process_buffer(buf); + bool b = mut_process_buffer(node); if (b) { // True here means that the buffer hasn't been deallocated and the caller may reuse it. return true; @@ -251,14 +248,12 @@ } } // The buffer will be enqueued. The caller will have to get a new one. - enqueue_complete_buffer(buf); + enqueue_complete_buffer(node); return false; } -void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) { +void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) { MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); - BufferNode* cbn = BufferNode::make_node_from_buffer(buf); - cbn->set_index(index); cbn->set_next(NULL); if (_completed_buffers_tail == NULL) { assert(_completed_buffers_head == NULL, "Well-formedness"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/ptrQueue.hpp --- a/hotspot/src/share/vm/gc/g1/ptrQueue.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/ptrQueue.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,6 +33,7 @@ // the addresses of modified old-generation objects. This type supports // this operation. +class BufferNode; class PtrQueueSet; class PtrQueue VALUE_OBJ_CLASS_SPEC { friend class VMStructs; @@ -104,7 +105,7 @@ // get into an infinite loop). virtual bool should_enqueue_buffer() { return true; } void handle_zero_index(); - void locking_enqueue_completed_buffer(void** buf); + void locking_enqueue_completed_buffer(BufferNode* node); void enqueue_known_active(void* ptr); @@ -136,6 +137,10 @@ return ind / sizeof(void*); } + static size_t index_to_byte_index(size_t ind) { + return ind * sizeof(void*); + } + // To support compiler. protected: @@ -186,10 +191,13 @@ // Free a BufferNode. static void deallocate(BufferNode* node); - // Return the BufferNode containing the buffer. - static BufferNode* make_node_from_buffer(void** buffer) { - return reinterpret_cast( - reinterpret_cast(buffer) - buffer_offset()); + // Return the BufferNode containing the buffer, after setting its index. + static BufferNode* make_node_from_buffer(void** buffer, size_t index) { + BufferNode* node = + reinterpret_cast( + reinterpret_cast(buffer) - buffer_offset()); + node->set_index(index); + return node; } // Return the buffer for node. @@ -243,7 +251,7 @@ // A mutator thread does the the work of processing a buffer. // Returns "true" iff the work is complete (and the buffer may be // deallocated). - virtual bool mut_process_buffer(void** buf) { + virtual bool mut_process_buffer(BufferNode* node) { ShouldNotReachHere(); return false; } @@ -267,13 +275,13 @@ // Return an empty buffer to the free list. The "buf" argument is // required to be a pointer to the head of an array of length "_sz". - void deallocate_buffer(void** buf); + void deallocate_buffer(BufferNode* node); // Declares that "buf" is a complete buffer. - void enqueue_complete_buffer(void** buf, size_t index = 0); + void enqueue_complete_buffer(BufferNode* node); // To be invoked by the mutator. - bool process_or_enqueue_complete_buffer(void** buf); + bool process_or_enqueue_complete_buffer(BufferNode* node); bool completed_buffers_exist_dirty() { return _n_completed_buffers > 0; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp --- a/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -100,6 +100,10 @@ return true; } +inline bool retain_entry(const void* entry, G1CollectedHeap* heap) { + return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry); +} + // This method removes entries from a SATB buffer that will not be // useful to the concurrent marking threads. Entries are retained if // they require marking and are not already marked. Retained entries @@ -114,43 +118,28 @@ return; } - // Used for sanity checking at the end of the loop. - DEBUG_ONLY(size_t entries = 0; size_t retained = 0;) + assert(_index <= _sz, "invariant"); - assert(_index <= _sz, "invariant"); - void** limit = &buf[byte_index_to_index(_index)]; - void** src = &buf[byte_index_to_index(_sz)]; - void** dst = src; - - while (limit < src) { - DEBUG_ONLY(entries += 1;) - --src; + // Two-fingered compaction toward the end. + void** src = &buf[byte_index_to_index(_index)]; + void** dst = &buf[byte_index_to_index(_sz)]; + for ( ; src < dst; ++src) { + // Search low to high for an entry to keep. void* entry = *src; - // NULL the entry so that unused parts of the buffer contain NULLs - // at the end. If we are going to retain it we will copy it to its - // final place. If we have retained all entries we have visited so - // far, we'll just end up copying it to the same place. - *src = NULL; - - if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) { - --dst; - assert(*dst == NULL, "filtering destination should be clear"); - *dst = entry; - DEBUG_ONLY(retained += 1;); + if (retain_entry(entry, g1h)) { + // Found keeper. Search high to low for an entry to discard. + while (src < --dst) { + if (!retain_entry(*dst, g1h)) { + *dst = entry; // Replace discard with keeper. + break; + } + } + // If discard search failed (src == dst), the outer loop will also end. } } - size_t new_index = pointer_delta(dst, buf, 1); - -#ifdef ASSERT - size_t entries_calc = (_sz - _index) / sizeof(void*); - assert(entries == entries_calc, "the number of entries we counted " - "should match the number of entries we calculated"); - size_t retained_calc = (_sz - new_index) / sizeof(void*); - assert(retained == retained_calc, "the number of retained entries we counted " - "should match the number of retained entries we calculated"); -#endif // ASSERT - - _index = new_index; + // dst points to the lowest retained entry, or the end of the buffer + // if all the entries were filtered out. + _index = pointer_delta(dst, buf, 1); } // This method will first apply the above filtering to the buffer. If @@ -286,19 +275,11 @@ } if (nd != NULL) { void **buf = BufferNode::make_buffer_from_node(nd); - // Skip over NULL entries at beginning (e.g. push end) of buffer. - // Filtering can result in non-full completed buffers; see - // should_enqueue_buffer. - assert(_sz % sizeof(void*) == 0, "invariant"); - size_t limit = SATBMarkQueue::byte_index_to_index(_sz); - for (size_t i = 0; i < limit; ++i) { - if (buf[i] != NULL) { - // Found the end of the block of NULLs; process the remainder. - cl->do_buffer(buf + i, limit - i); - break; - } - } - deallocate_buffer(buf); + size_t index = SATBMarkQueue::byte_index_to_index(nd->index()); + size_t size = SATBMarkQueue::byte_index_to_index(_sz); + assert(index <= size, "invariant"); + cl->do_buffer(buf + index, size - index); + deallocate_buffer(nd); return true; } else { return false; @@ -355,7 +336,7 @@ while (buffers_to_delete != NULL) { BufferNode* nd = buffers_to_delete; buffers_to_delete = nd->next(); - deallocate_buffer(BufferNode::make_buffer_from_node(nd)); + deallocate_buffer(nd); } assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); // So we can safely manipulate these queues. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp --- a/hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/satbMarkQueue.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,9 +115,8 @@ // If there exists some completed buffer, pop and process it, and // return true. Otherwise return false. Processing a buffer - // consists of applying the closure to the buffer range starting - // with the first non-NULL entry to the end of the buffer; the - // leading entries may be NULL due to filtering. + // consists of applying the closure to the active range of the + // buffer; the leading entries may be excluded due to filtering. bool apply_closure_to_completed_buffer(SATBBufferClosure* cl); #ifndef PRODUCT diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/suspendibleThreadSet.cpp --- a/hotspot/src/share/vm/gc/g1/suspendibleThreadSet.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/suspendibleThreadSet.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/semaphore.hpp" #include "runtime/thread.inline.hpp" uint SuspendibleThreadSet::_nthreads = 0; @@ -32,6 +33,19 @@ bool SuspendibleThreadSet::_suspend_all = false; double SuspendibleThreadSet::_suspend_all_start = 0.0; +static Semaphore* _synchronize_wakeup = NULL; + +void SuspendibleThreadSet_init() { + assert(_synchronize_wakeup == NULL, "STS already initialized"); + _synchronize_wakeup = new Semaphore(); +} + +bool SuspendibleThreadSet::is_synchronized() { + assert_lock_strong(STS_lock); + assert(_nthreads_stopped <= _nthreads, "invariant"); + return _nthreads_stopped == _nthreads; +} + void SuspendibleThreadSet::join() { assert(!Thread::current()->is_suspendible_thread(), "Thread already joined"); MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); @@ -48,31 +62,30 @@ assert(_nthreads > 0, "Invalid"); DEBUG_ONLY(Thread::current()->clear_suspendible_thread();) _nthreads--; - if (_suspend_all) { - ml.notify_all(); + if (_suspend_all && is_synchronized()) { + // This leave completes a request, so inform the requestor. + _synchronize_wakeup->signal(); } } void SuspendibleThreadSet::yield() { assert(Thread::current()->is_suspendible_thread(), "Must have joined"); + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); if (_suspend_all) { - MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); - if (_suspend_all) { - _nthreads_stopped++; - if (_nthreads_stopped == _nthreads) { - if (ConcGCYieldTimeout > 0) { - double now = os::elapsedTime(); - guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay"); - } + _nthreads_stopped++; + if (is_synchronized()) { + if (ConcGCYieldTimeout > 0) { + double now = os::elapsedTime(); + guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay"); } - ml.notify_all(); - while (_suspend_all) { - ml.wait(Mutex::_no_safepoint_check_flag); - } - assert(_nthreads_stopped > 0, "Invalid"); - _nthreads_stopped--; - ml.notify_all(); + // This yield completes the request, so inform the requestor. + _synchronize_wakeup->signal(); } + while (_suspend_all) { + ml.wait(Mutex::_no_safepoint_check_flag); + } + assert(_nthreads_stopped > 0, "Invalid"); + _nthreads_stopped--; } } @@ -81,18 +94,41 @@ if (ConcGCYieldTimeout > 0) { _suspend_all_start = os::elapsedTime(); } + { + MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); + assert(!_suspend_all, "Only one at a time"); + _suspend_all = true; + if (is_synchronized()) { + return; + } + } // Release lock before semaphore wait. + + // Semaphore initial count is zero. To reach here, there must be at + // least one not yielded thread in the set, e.g. is_synchronized() + // was false before the lock was released. A thread in the set will + // signal the semaphore iff it is the last to yield or leave while + // there is an active suspend request. So there will be exactly one + // signal, which will increment the semaphore count to one, which + // will then be consumed by this wait, returning it to zero. No + // thread can exit yield or enter the set until desynchronize is + // called, so there are no further opportunities for the semaphore + // being signaled until we get back here again for some later + // synchronize call. Hence, there is no need to re-check for + // is_synchronized after the wait; it will always be true there. + _synchronize_wakeup->wait(); + +#ifdef ASSERT MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); - assert(!_suspend_all, "Only one at a time"); - _suspend_all = true; - while (_nthreads_stopped < _nthreads) { - ml.wait(Mutex::_no_safepoint_check_flag); - } + assert(_suspend_all, "STS not synchronizing"); + assert(is_synchronized(), "STS not synchronized"); +#endif } void SuspendibleThreadSet::desynchronize() { assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag); - assert(_nthreads_stopped == _nthreads, "Invalid"); + assert(_suspend_all, "STS not synchronizing"); + assert(is_synchronized(), "STS not synchronized"); _suspend_all = false; ml.notify_all(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/suspendibleThreadSet.hpp --- a/hotspot/src/share/vm/gc/g1/suspendibleThreadSet.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/suspendibleThreadSet.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,8 @@ static bool _suspend_all; static double _suspend_all_start; + static bool is_synchronized(); + // Add the current thread to the set. May block if a suspension is in progress. static void join(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp --- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -205,30 +205,18 @@ } void VM_CGC_Operation::acquire_pending_list_lock() { - assert(_needs_pll, "don't call this otherwise"); - // The caller may block while communicating - // with the SLT thread in order to acquire/release the PLL. - SurrogateLockerThread* slt = ConcurrentMarkThread::slt(); - if (slt != NULL) { - slt->manipulatePLL(SurrogateLockerThread::acquirePLL); - } else { - SurrogateLockerThread::report_missing_slt(); - } + _pending_list_locker.lock(); } void VM_CGC_Operation::release_and_notify_pending_list_lock() { - assert(_needs_pll, "don't call this otherwise"); - // The caller may block while communicating - // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkThread::slt()-> - manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); + _pending_list_locker.unlock(); } void VM_CGC_Operation::doit() { GCIdMark gc_id_mark(_gc_id); GCTraceCPUTime tcpu; G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true); + GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true); IsGCActiveMark x; _cl->do_void(); } @@ -236,10 +224,9 @@ bool VM_CGC_Operation::doit_prologue() { // Note the relative order of the locks must match that in // VM_GC_Operation::doit_prologue() or deadlocks can occur - if (_needs_pll) { + if (_needs_pending_list_lock) { acquire_pending_list_lock(); } - Heap_lock->lock(); return true; } @@ -248,7 +235,7 @@ // Note the relative order of the unlocks must match that in // VM_GC_Operation::doit_epilogue() Heap_lock->unlock(); - if (_needs_pll) { + if (_needs_pending_list_lock) { release_and_notify_pending_list_lock(); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp --- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc/g1/g1AllocationContext.hpp" #include "gc/shared/gcId.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "gc/shared/vmGCOperations.hpp" // VM_operations for the G1 collector. @@ -102,10 +103,11 @@ // Concurrent GC stop-the-world operations such as remark and cleanup; // consider sharing these with CMS's counterparts. class VM_CGC_Operation: public VM_Operation { - VoidClosure* _cl; - const char* _printGCMessage; - bool _needs_pll; - uint _gc_id; + VoidClosure* _cl; + const char* _printGCMessage; + bool _needs_pending_list_lock; + ReferencePendingListLocker _pending_list_locker; + uint _gc_id; protected: // java.lang.ref.Reference support @@ -113,8 +115,8 @@ void release_and_notify_pending_list_lock(); public: - VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll) - : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { } + VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pending_list_lock) + : _cl(cl), _printGCMessage(printGCMsg), _needs_pending_list_lock(needs_pending_list_lock), _gc_id(GCId::current()) {} virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual void doit(); virtual bool doit_prologue(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/workerDataArray.cpp --- a/hotspot/src/share/vm/gc/g1/workerDataArray.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/workerDataArray.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,67 +27,178 @@ #include "utilities/ostream.hpp" template <> -void WorkerDataArray::WDAPrinter::summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum) { - out->print("%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", title, min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS); +size_t WorkerDataArray::uninitialized() { + return (size_t)-1; +} + +template <> +double WorkerDataArray::uninitialized() { + return -1.0; +} + +template <> +void WorkerDataArray::WDAPrinter::summary(outputStream* out, double min, double avg, double max, double diff, double sum, bool print_sum) { + out->print(" Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS); if (print_sum) { - out->print_cr(", Sum: %4.1lf", sum * MILLIUNITS); - } else { - out->cr(); + out->print(", Sum: %4.1lf", sum * MILLIUNITS); } } template <> -void WorkerDataArray::WDAPrinter::summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) { - out->print("%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, title, min, avg, max, diff); +void WorkerDataArray::WDAPrinter::summary(outputStream* out, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) { + out->print(" Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, min, avg, max, diff); if (print_sum) { - out->print_cr(", Sum: " SIZE_FORMAT, sum); - } else { - out->cr(); + out->print(", Sum: " SIZE_FORMAT, sum); } } template <> -void WorkerDataArray::WDAPrinter::details(const WorkerDataArray* phase, outputStream* out, uint active_threads) { +void WorkerDataArray::WDAPrinter::details(const WorkerDataArray* phase, outputStream* out) { out->print("%-25s", ""); - for (uint i = 0; i < active_threads; ++i) { - out->print(" %4.1lf", phase->get(i) * 1000.0); + for (uint i = 0; i < phase->_length; ++i) { + double value = phase->get(i); + if (value != phase->uninitialized()) { + out->print(" %4.1lf", phase->get(i) * 1000.0); + } else { + out->print(" -"); + } } out->cr(); } template <> -void WorkerDataArray::WDAPrinter::details(const WorkerDataArray* phase, outputStream* out, uint active_threads) { +void WorkerDataArray::WDAPrinter::details(const WorkerDataArray* phase, outputStream* out) { out->print("%-25s", ""); - for (uint i = 0; i < active_threads; ++i) { - out->print(" " SIZE_FORMAT, phase->get(i)); + for (uint i = 0; i < phase->_length; ++i) { + size_t value = phase->get(i); + if (value != phase->uninitialized()) { + out->print(" " SIZE_FORMAT, phase->get(i)); + } else { + out->print(" -"); + } } out->cr(); } #ifndef PRODUCT -void WorkerDataArray_test() { - const uint length = 3; - const char* title = "Test array"; + +#include "memory/resourceArea.hpp" + +void WorkerDataArray_test_verify_string(const char* expected_string, const char* actual_string) { + const size_t expected_len = strlen(expected_string); + + assert(expected_len == strlen(actual_string), + "Wrong string length, expected " SIZE_FORMAT " but got " SIZE_FORMAT "(Expected '%s' but got: '%s')", + expected_len, strlen(actual_string), expected_string, actual_string); - WorkerDataArray array(length, title); - assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match"); + // Can't use strncmp here because floating point values use different decimal points for different locales. + // Allow strings to differ in "." vs. "," only. This should still catch most errors. + for (size_t i = 0; i < expected_len; i++) { + char e = expected_string[i]; + char a = actual_string[i]; + if (e != a) { + if ((e == '.' || e == ',') && (a == '.' || a == ',')) { + // Most likely just a difference in locale + } else { + assert(false, "Expected '%s' but got: '%s'", expected_string, actual_string); + } + } + } +} + +void WorkerDataArray_test_verify_array(WorkerDataArray& array, size_t expected_sum, double expected_avg, const char* expected_summary, const char* exected_details) { + const double epsilon = 0.0001; + assert(array.sum() == expected_sum, "Wrong sum, expected: " SIZE_FORMAT " but got: " SIZE_FORMAT, expected_sum, array.sum()); + assert(fabs(array.average() - expected_avg) < epsilon, "Wrong average, expected: %f but got: %f", expected_avg, array.average()); - const size_t expected[length] = {5, 3, 7}; - for (uint i = 0; i < length; i++) { - array.set(i, expected[i]); - } - for (uint i = 0; i < length; i++) { - assert(array.get(i) == expected[i], "Expected elements to match"); + ResourceMark rm; + stringStream out; + array.print_summary_on(&out); + WorkerDataArray_test_verify_string(expected_summary, out.as_string()); + out.reset(); + array.print_details_on(&out); + WorkerDataArray_test_verify_string(exected_details, out.as_string()); +} + +void WorkerDataArray_test_verify_array(WorkerDataArray& array, double expected_sum, double expected_avg, const char* expected_summary, const char* exected_details) { + const double epsilon = 0.0001; + assert(fabs(array.sum() - expected_sum) < epsilon, "Wrong sum, expected: %f but got: %f", expected_sum, array.sum()); + assert(fabs(array.average() - expected_avg) < epsilon, "Wrong average, expected: %f but got: %f", expected_avg, array.average()); + + ResourceMark rm; + stringStream out; + array.print_summary_on(&out); + WorkerDataArray_test_verify_string(expected_summary, out.as_string()); + out.reset(); + array.print_details_on(&out); + WorkerDataArray_test_verify_string(exected_details, out.as_string()); +} + +void WorkerDataArray_test_basic() { + WorkerDataArray array(3, "Test array"); + array.set(0, 5); + array.set(1, 3); + array.set(2, 7); + + WorkerDataArray_test_verify_array(array, 15, 5.0, + "Test array Min: 3, Avg: 5.0, Max: 7, Diff: 4, Sum: 15, Workers: 3\n", + " 5 3 7\n" ); +} + +void WorkerDataArray_test_add() { + WorkerDataArray array(3, "Test array"); + array.set(0, 5); + array.set(1, 3); + array.set(2, 7); + + for (uint i = 0; i < 3; i++) { + array.add(i, 1); } - assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match"); - assert(array.average(length) == 5.0, "Expected averages to match"); + WorkerDataArray_test_verify_array(array, 18, 6.0, + "Test array Min: 4, Avg: 6.0, Max: 8, Diff: 4, Sum: 18, Workers: 3\n", + " 6 4 8\n" ); +} + +void WorkerDataArray_test_with_uninitialized() { + WorkerDataArray array(3, "Test array"); + array.set(0, 5); + array.set(1, WorkerDataArray::uninitialized()); + array.set(2, 7); + + WorkerDataArray_test_verify_array(array, 12, 6, + "Test array Min: 5, Avg: 6.0, Max: 7, Diff: 2, Sum: 12, Workers: 2\n", + " 5 - 7\n" ); +} + +void WorkerDataArray_test_uninitialized() { + WorkerDataArray array(3, "Test array"); + array.set(0, WorkerDataArray::uninitialized()); + array.set(1, WorkerDataArray::uninitialized()); + array.set(2, WorkerDataArray::uninitialized()); - for (uint i = 0; i < length; i++) { - array.add(i, 1); - } - for (uint i = 0; i < length; i++) { - assert(array.get(i) == expected[i] + 1, "Expected add to increment values"); - } + WorkerDataArray_test_verify_array(array, 0, 0.0, + "Test array skipped\n", + " - - -\n" ); } + +void WorkerDataArray_test_double_with_uninitialized() { + WorkerDataArray array(3, "Test array"); + array.set(0, 5.1 / MILLIUNITS); + array.set(1, WorkerDataArray::uninitialized()); + array.set(2, 7.2 / MILLIUNITS); + + WorkerDataArray_test_verify_array(array, 12.3 / MILLIUNITS, 6.15 / MILLIUNITS, + "Test array Min: 5.1, Avg: 6.1, Max: 7.2, Diff: 2.1, Sum: 12.3, Workers: 2\n", + " 5.1 - 7.2\n" ); +} + +void WorkerDataArray_test() { + WorkerDataArray_test_basic(); + WorkerDataArray_test_add(); + WorkerDataArray_test_with_uninitialized(); + WorkerDataArray_test_uninitialized(); + WorkerDataArray_test_double_with_uninitialized(); +} + #endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/workerDataArray.hpp --- a/hotspot/src/share/vm/gc/g1/workerDataArray.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/workerDataArray.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,16 +32,13 @@ template class WorkerDataArray : public CHeapObj { + friend class WDAPrinter; T* _data; uint _length; const char* _title; WorkerDataArray* _thread_work_items; - NOT_PRODUCT(inline T uninitialized() const;) - - void set_all(T value); - public: WorkerDataArray(uint length, const char* title); ~WorkerDataArray(); @@ -52,37 +49,38 @@ return _thread_work_items; } + static T uninitialized(); + void set(uint worker_i, T value); T get(uint worker_i) const; void add(uint worker_i, T value); - double average(uint active_threads) const; - T sum(uint active_threads) const; + // The sum() and average() methods below consider uninitialized slots to be 0. + double average() const; + T sum() const; const char* title() const { return _title; } - void clear(); - - void reset() PRODUCT_RETURN; - void verify(uint active_threads) const PRODUCT_RETURN; + void reset(); + void set_all(T value); private: class WDAPrinter { public: - static void summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum); - static void summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum); + static void summary(outputStream* out, double min, double avg, double max, double diff, double sum, bool print_sum); + static void summary(outputStream* out, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum); - static void details(const WorkerDataArray* phase, outputStream* out, uint active_threads); - static void details(const WorkerDataArray* phase, outputStream* out, uint active_threads); + static void details(const WorkerDataArray* phase, outputStream* out); + static void details(const WorkerDataArray* phase, outputStream* out); }; public: - void print_summary_on(outputStream* out, uint active_threads, bool print_sum = true) const; - void print_details_on(outputStream* out, uint active_threads) const; + void print_summary_on(outputStream* out, bool print_sum = true) const; + void print_details_on(outputStream* out) const; }; #endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp --- a/hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/workerDataArray.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,6 @@ template T WorkerDataArray::get(uint worker_i) const { assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length); - assert(_data[worker_i] != uninitialized(), "No data added for worker %d", worker_i); return _data[worker_i]; } @@ -78,25 +77,31 @@ } template -double WorkerDataArray::average(uint active_threads) const { - return sum(active_threads) / (double) active_threads; +double WorkerDataArray::average() const { + uint contributing_threads = 0; + for (uint i = 0; i < _length; ++i) { + if (get(i) != uninitialized()) { + contributing_threads++; + } + } + if (contributing_threads == 0) { + return 0.0; + } + return sum() / (double) contributing_threads; } template -T WorkerDataArray::sum(uint active_threads) const { - T s = get(0); - for (uint i = 1; i < active_threads; ++i) { - s += get(i); +T WorkerDataArray::sum() const { + T s = 0; + for (uint i = 0; i < _length; ++i) { + if (get(i) != uninitialized()) { + s += get(i); + } } return s; } template -void WorkerDataArray::clear() { - set_all(0); -} - -template void WorkerDataArray::set_all(T value) { for (uint i = 0; i < _length; i++) { _data[i] = value; @@ -104,27 +109,42 @@ } template -void WorkerDataArray::print_summary_on(outputStream* out, uint active_threads, bool print_sum) const { - T max = get(0); - T min = max; - T sum = 0; - for (uint i = 1; i < active_threads; ++i) { - T value = get(i); - max = MAX2(max, value); - min = MIN2(min, value); - sum += value; +void WorkerDataArray::print_summary_on(outputStream* out, bool print_sum) const { + out->print("%-25s", title()); + uint start = 0; + while (start < _length && get(start) == uninitialized()) { + start++; } - T diff = max - min; - double avg = sum / (double) active_threads; - WDAPrinter::summary(out, title(), min, avg, max, diff, sum, print_sum); + if (start < _length) { + T min = get(start); + T max = min; + T sum = 0; + uint contributing_threads = 0; + for (uint i = start; i < _length; ++i) { + T value = get(i); + if (value != uninitialized()) { + max = MAX2(max, value); + min = MIN2(min, value); + sum += value; + contributing_threads++; + } + } + T diff = max - min; + assert(contributing_threads != 0, "Must be since we found a used value for the start index"); + double avg = sum / (double) contributing_threads; + WDAPrinter::summary(out, min, avg, max, diff, sum, print_sum); + out->print_cr(", Workers: %d", contributing_threads); + } else { + // No data for this phase. + out->print_cr(" skipped"); + } } template -void WorkerDataArray::print_details_on(outputStream* out, uint active_threads) const { - WDAPrinter::details(this, out, active_threads); +void WorkerDataArray::print_details_on(outputStream* out) const { + WDAPrinter::details(this, out); } -#ifndef PRODUCT template void WorkerDataArray::reset() { set_all(uninitialized()); @@ -133,27 +153,4 @@ } } -template -void WorkerDataArray::verify(uint active_threads) const { - assert(active_threads <= _length, "Wrong number of active threads"); - for (uint i = 0; i < active_threads; i++) { - assert(_data[i] != uninitialized(), - "Invalid data for worker %u in '%s'", i, _title); - } - if (_thread_work_items != NULL) { - _thread_work_items->verify(active_threads); - } -} - -template <> -inline size_t WorkerDataArray::uninitialized() const { - return (size_t)-1; -} - -template <> -inline double WorkerDataArray::uninitialized() const { - return -1.0; -} -#endif - #endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/g1/youngList.cpp --- a/hotspot/src/share/vm/gc/g1/youngList.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/g1/youngList.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.inline.hpp" @@ -153,7 +154,7 @@ // The region is a non-empty survivor so let's add it to // the incremental collection set for the next evacuation // pause. - _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); + _g1h->collection_set()->add_survivor_regions(curr); young_index_in_cset += 1; } assert((uint) young_index_in_cset == _survivor_length, "post-condition"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp --- a/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/adjoiningGenerations.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -120,7 +120,7 @@ } void log_before_expansion(bool old, size_t expand_in_bytes, size_t change_in_bytes, size_t max_size) { - LogHandle(heap, ergo) log; + Log(heap, ergo) log; if (!log.is_debug()) { return; } @@ -133,7 +133,7 @@ } void log_after_expansion(bool old, size_t max_size) { - LogHandle(heap, ergo) log; + Log(heap, ergo) log; if (!log.is_debug()) { return; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp --- a/hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/asPSOldGen.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -126,7 +126,7 @@ // Also adjust for inter-generational alignment size_t result_aligned = align_size_down(result, gen_alignment); - LogHandle(gc, ergo) log; + Log(gc, ergo) log; if (log.is_trace()) { size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); size_t promo_increment = policy->promo_increment(max_contraction); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/gcTaskManager.cpp --- a/hotspot/src/share/vm/gc/parallel/gcTaskManager.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/gcTaskManager.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.inline.hpp" @@ -404,12 +405,15 @@ for (uint t = 0; t < workers(); t += 1) { set_thread(t, GCTaskThread::create(this, t, processor_assignment[t])); } - if (TraceGCTaskThread) { - tty->print("GCTaskManager::initialize: distribution:"); + Log(gc, task, thread) log; + if (log.is_trace()) { + ResourceMark rm; + outputStream* out = log.trace_stream(); + out->print("GCTaskManager::initialize: distribution:"); for (uint t = 0; t < workers(); t += 1) { - tty->print(" %u", processor_assignment[t]); + out->print(" %u", processor_assignment[t]); } - tty->cr(); + out->cr(); } FREE_C_HEAP_ARRAY(uint, processor_assignment); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp --- a/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,14 +107,11 @@ this->initialize_named_thread(); // Bind yourself to your processor. if (processor_id() != GCTaskManager::sentinel_worker()) { - if (TraceGCTaskThread) { - tty->print_cr("GCTaskThread::run: " - " binding to processor %u", processor_id()); - } + log_trace(gc, task, thread)("GCTaskThread::run: binding to processor %u", processor_id()); if (!os::bind_to_processor(processor_id())) { DEBUG_ONLY( - warning("Couldn't bind GCTaskThread %u to processor %u", - which(), processor_id()); + log_warning(gc)("Couldn't bind GCTaskThread %u to processor %u", + which(), processor_id()); ) } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/mutableSpace.cpp --- a/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -58,7 +58,7 @@ } void MutableSpace::pretouch_pages(MemRegion mr) { - os::pretouch_memory((char*)mr.start(), (char*)mr.end()); + os::pretouch_memory(mr.start(), mr.end()); } void MutableSpace::initialize(MemRegion mr, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.cpp --- a/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -325,8 +325,8 @@ loop_count++; if ((result == NULL) && (QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) { - warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" - " size=" SIZE_FORMAT, loop_count, size); + log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); + log_warning(gc)("\tsize=" SIZE_FORMAT, size); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/pcTasks.cpp --- a/hotspot/src/share/vm/gc/parallel/pcTasks.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/pcTasks.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "logging/log.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" @@ -166,7 +167,7 @@ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); - RegionTaskQueueSet* qset = ParCompactionManager::region_array(); + OopTaskQueueSet* qset = ParCompactionManager::stack_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; isetup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( @@ -533,26 +535,37 @@ // This is the point where the entire marking should have completed. assert(_marking_stack.is_empty(), "Marking should have completed"); - // Unload classes and purge the SystemDictionary. - bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); + { + GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer); + + // Unload classes and purge the SystemDictionary. + bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Unload nmethods. - CodeCache::do_unloading(is_alive_closure(), purged_class); + // Unload nmethods. + CodeCache::do_unloading(is_alive_closure(), purged_class); + + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(is_alive_closure()); + } - // Prune dead klasses from subklass/sibling/implementor lists. - Klass::clean_weak_klass_links(is_alive_closure()); + { + GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer); + // Delete entries for dead interned strings. + StringTable::unlink(is_alive_closure()); + } - // Delete entries for dead interned strings. - StringTable::unlink(is_alive_closure()); + { + GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer); + // Clean up unreferenced symbols in symbol table. + SymbolTable::unlink(); + } - // Clean up unreferenced symbols in symbol table. - SymbolTable::unlink(); _gc_tracer->report_object_count_after_gc(is_alive_closure()); } void PSMarkSweep::mark_sweep_phase2() { - GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); // Now all live objects are marked, compute the new object addresses. @@ -570,16 +583,9 @@ old_gen->precompact(); } -// This should be moved to the shared markSweep code! -class PSAlwaysTrueClosure: public BoolObjectClosure { -public: - bool do_object_b(oop p) { return true; } -}; -static PSAlwaysTrueClosure always_true; - void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); @@ -603,7 +609,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); + JNIHandles::weak_oops_do(adjust_pointer_closure()); CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); @@ -619,7 +625,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); // All pointers are now adjusted, move objects accordingly @@ -638,7 +644,7 @@ jlong ret_val = now - _time_of_last_gc; // XXX See note in genCollectedHeap::millis_since_last_gc(). if (ret_val < 0) { - NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);) + NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);) return 0; } return ret_val; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psMarkSweepDecorator.cpp --- a/hotspot/src/share/vm/gc/parallel/psMarkSweepDecorator.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psMarkSweepDecorator.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,7 +29,6 @@ #include "gc/parallel/psMarkSweep.hpp" #include "gc/parallel/psMarkSweepDecorator.hpp" #include "gc/serial/markSweep.inline.hpp" -#include "gc/shared/liveRange.hpp" #include "gc/shared/spaceDecorator.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" @@ -107,9 +106,6 @@ HeapWord* end_of_live= q; /* One byte beyond the last byte of the last live object. */ HeapWord* first_dead = space()->end(); /* The first dead object. */ - LiveRange* liveRange = NULL; /* The current live range, recorded in the - first header of preceding free area. */ - _first_dead = first_dead; const intx interval = PrefetchScanIntervalInBytes; @@ -231,17 +227,8 @@ } } - /* for the previous LiveRange, record the end of the live objects. */ - if (liveRange) { - liveRange->set_end(q); - } - - /* record the current LiveRange object. - * liveRange->start() is overlaid on the mark word. - */ - liveRange = (LiveRange*)q; - liveRange->set_start(end); - liveRange->set_end(end); + // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. + (*(HeapWord**)q) = end; /* see if this is the first dead region. */ if (q < first_dead) { @@ -254,9 +241,6 @@ } assert(q == t, "just checking"); - if (liveRange != NULL) { - liveRange->set_end(q); - } _end_of_live = end_of_live; if (end_of_live < first_dead) { first_dead = end_of_live; @@ -307,9 +291,8 @@ if (_first_dead == t) { q = t; } else { - // $$$ This is funky. Using this to read the previously written - // LiveRange. See also use below. - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); + // The first dead object should contain a pointer to the first live object + q = *(HeapWord**)_first_dead; } } const intx interval = PrefetchScanIntervalInBytes; @@ -325,11 +308,11 @@ debug_only(prev_q = q); q += size; } else { - // q is not a live object, so its mark should point at the next - // live object debug_only(prev_q = q); - q = (HeapWord*) oop(q)->mark()->decode_pointer(); - assert(q > prev_q, "we should be moving forward through memory"); + // The first dead object is no longer an object. At that memory address, + // there is a pointer to the first live object that the previous phase found. + q = *(HeapWord**)q; + assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q)); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psOldGen.cpp --- a/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -309,7 +309,7 @@ const size_t remaining_bytes = virtual_space()->uncommitted_size(); if (remaining_bytes > 0) { result = expand_by(remaining_bytes); - DEBUG_ONLY(if (!result) warning("grow to reserve failed")); + DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed")); } return result; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp --- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -51,6 +51,7 @@ #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/spaceDecorator.hpp" #include "logging/log.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/methodData.hpp" @@ -195,10 +196,10 @@ }; void PSParallelCompact::print_region_ranges() { - if (!log_develop_is_enabled(Trace, gc, compaction, phases)) { + if (!log_develop_is_enabled(Trace, gc, compaction)) { return; } - LogHandle(gc, compaction, phases) log; + Log(gc, compaction) log; ResourceMark rm; Universe::print_on(log.trace_stream()); log.trace("space bottom top end new_top"); @@ -225,7 +226,7 @@ ParallelCompactData& sd = PSParallelCompact::summary_data(); size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0; - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( REGION_IDX_FORMAT " " PTR_FORMAT " " REGION_IDX_FORMAT " " PTR_FORMAT " " REGION_DATA_FORMAT " " REGION_DATA_FORMAT " " @@ -258,14 +259,14 @@ ++i; } - log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize); + log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize); } void print_generic_summary_data(ParallelCompactData& summary_data, SpaceInfo* space_info) { - if (!log_develop_is_enabled(Trace, gc, compaction, phases)) { + if (!log_develop_is_enabled(Trace, gc, compaction)) { return; } @@ -296,7 +297,7 @@ size_t i = summary_data.addr_to_region_idx(space->bottom()); while (i < end_region && summary_data.region(i)->data_size() == region_size) { ParallelCompactData::RegionData* c = summary_data.region(i); - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d", i, p2i(c->destination()), c->partial_obj_size(), c->live_obj_size(), @@ -330,7 +331,7 @@ } ParallelCompactData::RegionData* c = summary_data.region(i); - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d" "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10), i, p2i(c->destination()), @@ -346,21 +347,21 @@ // Any remaining regions are empty. Print one more if there is one. if (i < end_region) { ParallelCompactData::RegionData* c = summary_data.region(i); - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d", i, p2i(c->destination()), c->partial_obj_size(), c->live_obj_size(), c->data_size(), c->source_region(), c->destination_count()); } - log_develop_trace(gc, compaction, phases)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f", - max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio); + log_develop_trace(gc, compaction)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f", + max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio); } void print_initial_summary_data(ParallelCompactData& summary_data, SpaceInfo* space_info) { - if (!log_develop_is_enabled(Trace, gc, compaction, phases)) { + if (!log_develop_is_enabled(Trace, gc, compaction)) { return; } @@ -621,7 +622,7 @@ sr->partial_obj_size())); const size_t end_idx = addr_to_region_idx(target_end); - log_develop_trace(gc, compaction, phases)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx); + log_develop_trace(gc, compaction)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx); for (size_t idx = beg_idx; idx < end_idx; ++idx) { _region_data[idx].set_source_region(0); } @@ -641,22 +642,22 @@ *target_next = split_destination + partial_obj_size; HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size; - if (log_develop_is_enabled(Trace, gc, compaction, phases)) { + if (log_develop_is_enabled(Trace, gc, compaction)) { const char * split_type = partial_obj_size == 0 ? "easy" : "hard"; - log_develop_trace(gc, compaction, phases)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT, - split_type, p2i(source_next), split_region, partial_obj_size); - log_develop_trace(gc, compaction, phases)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT, - split_type, p2i(split_destination), - addr_to_region_idx(split_destination), - p2i(*target_next)); + log_develop_trace(gc, compaction)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT, + split_type, p2i(source_next), split_region, partial_obj_size); + log_develop_trace(gc, compaction)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT, + split_type, p2i(split_destination), + addr_to_region_idx(split_destination), + p2i(*target_next)); if (partial_obj_size != 0) { HeapWord* const po_beg = split_info.destination(); HeapWord* const po_end = po_beg + split_info.partial_obj_size(); - log_develop_trace(gc, compaction, phases)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT, - split_type, - p2i(po_beg), addr_to_region_idx(po_beg), - p2i(po_end), addr_to_region_idx(po_end)); + log_develop_trace(gc, compaction)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT, + split_type, + p2i(po_beg), addr_to_region_idx(po_beg), + p2i(po_end), addr_to_region_idx(po_end)); } } @@ -670,7 +671,7 @@ HeapWord** target_next) { HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next; - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT, p2i(source_beg), p2i(source_end), p2i(source_next_val), @@ -938,7 +939,7 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. - GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer); + GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -981,7 +982,7 @@ void PSParallelCompact::post_compact() { - GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer); + GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1524,7 +1525,7 @@ } } - if (log_develop_is_enabled(Trace, gc, compaction, phases)) { + if (log_develop_is_enabled(Trace, gc, compaction)) { const size_t region_size = ParallelCompactData::RegionSize; HeapWord* const dense_prefix_end = _space_info[id].dense_prefix(); const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end); @@ -1532,7 +1533,7 @@ HeapWord* const new_top = _space_info[id].new_top(); const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top); const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, @@ -1548,7 +1549,7 @@ SpaceId src_space_id, HeapWord* src_beg, HeapWord* src_end) { - log_develop_trace(gc, compaction, phases)( + log_develop_trace(gc, compaction)( "Summarizing %d [%s] into %d [%s]: " "src=" PTR_FORMAT "-" PTR_FORMAT " " SIZE_FORMAT "-" SIZE_FORMAT " " @@ -1568,7 +1569,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer); + GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer); #ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { @@ -1584,7 +1585,7 @@ // Quick summarization of each space into itself, to see how much is live. summarize_spaces_quick(); - log_develop_trace(gc, compaction, phases)("summary phase: after summarizing each space to self"); + log_develop_trace(gc, compaction)("summary phase: after summarizing each space to self"); NOT_PRODUCT(print_region_ranges()); NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); @@ -1660,7 +1661,7 @@ } } - log_develop_trace(gc, compaction, phases)("Summary_phase: after final summarization"); + log_develop_trace(gc, compaction)("Summary_phase: after final summarization"); NOT_PRODUCT(print_region_ranges()); NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info)); } @@ -2042,12 +2043,12 @@ bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer); + GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); - TaskQueueSetSuper* qset = ParCompactionManager::region_array(); + TaskQueueSetSuper* qset = ParCompactionManager::stack_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); @@ -2057,7 +2058,7 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer); + GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; @@ -2086,7 +2087,7 @@ // Process reference objects found during marking { - GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer); + GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { @@ -2103,38 +2104,40 @@ gc_tracer->report_gc_reference_stats(stats); } - GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer); - // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); - // Follow system dictionary roots and unload classes. - bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - - // Unload nmethods. - CodeCache::do_unloading(is_alive_closure(), purged_class); - - // Prune dead klasses from subklass/sibling/implementor lists. - Klass::clean_weak_klass_links(is_alive_closure()); - - // Delete entries for dead interned strings. - StringTable::unlink(is_alive_closure()); - - // Clean up unreferenced symbols in symbol table. - SymbolTable::unlink(); + { + GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer); + + // Follow system dictionary roots and unload classes. + bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); + + // Unload nmethods. + CodeCache::do_unloading(is_alive_closure(), purged_class); + + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(is_alive_closure()); + } + + { + GCTraceTime(Debug, gc, phases) t("Scrub String Table", &_gc_timer); + // Delete entries for dead interned strings. + StringTable::unlink(is_alive_closure()); + } + + { + GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", &_gc_timer); + // Clean up unreferenced symbols in symbol table. + SymbolTable::unlink(); + } + _gc_tracer.report_object_count_after_gc(is_alive_closure()); } -// This should be moved to the shared markSweep code! -class PSAlwaysTrueClosure: public BoolObjectClosure { -public: - bool do_object_b(oop p) { return true; } -}; -static PSAlwaysTrueClosure always_true; - void PSParallelCompact::adjust_roots(ParCompactionManager* cm) { // Adjust the pointers to reflect the new locations - GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer); + GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2157,7 +2160,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, &oop_closure); + JNIHandles::weak_oops_do(&oop_closure); CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); @@ -2173,7 +2176,7 @@ // Helper class to print 8 region numbers per line and then print the total at the end. class FillableRegionLogger : public StackObj { private: - LogHandle(gc, compaction) log; + Log(gc, compaction) log; static const int LineLength = 8; size_t _regions[LineLength]; int _next_index; @@ -2375,7 +2378,7 @@ return; } - LogHandle(gc, compaction) log; + Log(gc, compaction) log; ResourceMark rm; outputStream* out = log.trace_stream(); @@ -2408,7 +2411,7 @@ #endif // #ifdef ASSERT void PSParallelCompact::compact() { - GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer); + GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); @@ -2467,9 +2470,8 @@ for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) { const RegionData* const c = sd.region(cur_region); if (!c->completed()) { - warning("region " SIZE_FORMAT " not filled: " - "destination_count=%u", - cur_region, c->destination_count()); + log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u", + cur_region, c->destination_count()); issued_a_warning = true; } } @@ -2477,9 +2479,8 @@ for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) { const RegionData* const c = sd.region(cur_region); if (!c->available()) { - warning("region " SIZE_FORMAT " not empty: " - "destination_count=%u", - cur_region, c->destination_count()); + log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u", + cur_region, c->destination_count()); issued_a_warning = true; } } @@ -3013,7 +3014,7 @@ jlong ret_val = now - _time_of_last_gc; // XXX See note in genCollectedHeap::millis_since_last_gc(). if (ret_val < 0) { - NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);) + NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);) return 0; } return ret_val; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psPromotionManager.cpp --- a/hotspot/src/share/vm/gc/parallel/psPromotionManager.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psPromotionManager.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,13 @@ #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.inline.hpp" #include "gc/shared/gcTrace.hpp" +#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/memRegion.hpp" #include "memory/padded.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" @@ -41,6 +43,7 @@ PaddedEnd* PSPromotionManager::_manager_array = NULL; OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; +PreservedMarksSet* PSPromotionManager::_preserved_marks_set = NULL; PSOldGen* PSPromotionManager::_old_gen = NULL; MutableSpace* PSPromotionManager::_young_space = NULL; @@ -50,10 +53,12 @@ _old_gen = heap->old_gen(); _young_space = heap->young_gen()->to_space(); + const uint promotion_manager_num = ParallelGCThreads + 1; + // To prevent false sharing, we pad the PSPromotionManagers // and make sure that the first instance starts at a cache line. assert(_manager_array == NULL, "Attempt to initialize twice"); - _manager_array = PaddedArray::create_unfreeable(ParallelGCThreads + 1); + _manager_array = PaddedArray::create_unfreeable(promotion_manager_num); guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); @@ -65,6 +70,14 @@ } // The VMThread gets its own PSPromotionManager, which is not available // for work stealing. + + assert(_preserved_marks_set == NULL, "Attempt to initialize twice"); + _preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */); + guarantee(_preserved_marks_set != NULL, "Could not initialize preserved marks set"); + _preserved_marks_set->init(promotion_manager_num); + for (uint i = 0; i < promotion_manager_num; i += 1) { + _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i)); + } } // Helper functions to get around the circular dependency between @@ -90,6 +103,7 @@ void PSPromotionManager::pre_scavenge() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); + _preserved_marks_set->assert_empty(); _young_space = heap->young_gen()->to_space(); for(uint i=0; iflush_labs(); } + if (!promotion_failure_occurred) { + // If there was no promotion failure, the preserved mark stacks + // should be empty. + _preserved_marks_set->assert_empty(); + } return promotion_failure_occurred; } @@ -133,7 +152,7 @@ if (!log_develop_is_enabled(Trace, gc, task, stats)) { return; } - LogHandle(gc, task, stats) log; + Log(gc, task, stats) log; ResourceMark rm; outputStream* out = log.trace_stream(); out->print_cr("== GC Tasks Stats, GC %3d", @@ -187,6 +206,8 @@ // let's choose 1.5x the chunk size _min_array_size_for_chunking = 3 * _array_chunk_size / 2; + _preserved_marks = NULL; + reset(); } @@ -211,6 +232,10 @@ TASKQUEUE_STATS_ONLY(reset_stats()); } +void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_marks) { + assert(_preserved_marks == NULL, "do not set it twice"); + _preserved_marks = preserved_marks; +} void PSPromotionManager::drain_stacks_depth(bool totally_drain) { totally_drain = totally_drain || _totally_drain; @@ -422,8 +447,7 @@ push_contents(obj); - // Save the mark if needed - PSScavenge::oop_promotion_failed(obj, obj_mark); + _preserved_marks->push_if_necessary(obj, obj_mark); } else { // We lost, someone else "owns" this object guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed."); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psPromotionManager.hpp --- a/hotspot/src/share/vm/gc/parallel/psPromotionManager.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psPromotionManager.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,6 +28,7 @@ #include "gc/parallel/psPromotionLAB.hpp" #include "gc/shared/copyFailedInfo.hpp" #include "gc/shared/gcTrace.hpp" +#include "gc/shared/preservedMarks.hpp" #include "gc/shared/taskqueue.hpp" #include "memory/allocation.hpp" #include "memory/padded.hpp" @@ -55,6 +56,7 @@ private: static PaddedEnd* _manager_array; static OopStarTaskQueueSet* _stack_array_depth; + static PreservedMarksSet* _preserved_marks_set; static PSOldGen* _old_gen; static MutableSpace* _young_space; @@ -84,6 +86,7 @@ uint _array_chunk_size; uint _min_array_size_for_chunking; + PreservedMarks* _preserved_marks; PromotionFailedInfo _promotion_failed_info; // Accessors @@ -176,6 +179,8 @@ oop oop_promotion_failed(oop obj, markOop obj_mark); void reset(); + void register_preserved_marks(PreservedMarks* preserved_marks); + static void restore_preserved_marks() { _preserved_marks_set->restore(); } void flush_labs(); void drain_stacks(bool totally_drain) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psScavenge.cpp --- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -68,8 +68,6 @@ elapsedTimer PSScavenge::_accumulated_time; STWGCTimer PSScavenge::_gc_timer; ParallelScavengeTracer PSScavenge::_gc_tracer; -Stack PSScavenge::_preserved_mark_stack; -Stack PSScavenge::_preserved_oop_stack; CollectorCounters* PSScavenge::_counters = NULL; // Define before use @@ -123,14 +121,6 @@ } }; -class PSPromotionFailedClosure : public ObjectClosure { - virtual void do_object(oop obj) { - if (obj->is_forwarded()) { - obj->init_mark(); - } - } -}; - class PSRefProcTaskProxy: public GCTask { typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; ProcessTask & _rp_task; @@ -257,9 +247,6 @@ assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); - assert(_preserved_mark_stack.is_empty(), "should be empty"); - assert(_preserved_oop_stack.is_empty(), "should be empty"); - _gc_timer.register_gc_start(); TimeStamp scavenge_entry; @@ -417,7 +404,7 @@ // Process reference objects discovered during scavenge { - GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer); + GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); @@ -446,7 +433,7 @@ } { - GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer); + GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); @@ -656,52 +643,20 @@ } // This method iterates over all objects in the young generation, -// unforwarding markOops. It then restores any preserved mark oops, -// and clears the _preserved_mark_stack. +// removing all forwarding references. It then restores any preserved marks. void PSScavenge::clean_up_failed_promotion() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSYoungGen* young_gen = heap->young_gen(); - { - ResourceMark rm; - - // Unforward all pointers in the young gen. - PSPromotionFailedClosure unforward_closure; - young_gen->object_iterate(&unforward_closure); - - log_trace(gc, ergo)("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size()); + RemoveForwardedPointerClosure remove_fwd_ptr_closure; + young_gen->object_iterate(&remove_fwd_ptr_closure); - // Restore any saved marks. - while (!_preserved_oop_stack.is_empty()) { - oop obj = _preserved_oop_stack.pop(); - markOop mark = _preserved_mark_stack.pop(); - obj->set_mark(mark); - } - - // Clear the preserved mark and oop stack caches. - _preserved_mark_stack.clear(true); - _preserved_oop_stack.clear(true); - } + PSPromotionManager::restore_preserved_marks(); // Reset the PromotionFailureALot counters. NOT_PRODUCT(heap->reset_promotion_should_fail();) } -// This method is called whenever an attempt to promote an object -// fails. Some markOops will need preservation, some will not. Note -// that the entire eden is traversed after a failed promotion, with -// all forwarded headers replaced by the default markOop. This means -// it is not necessary to preserve most markOops. -void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { - if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { - // Should use per-worker private stacks here rather than - // locking a common pair of stacks. - ThreadCritical tc; - _preserved_oop_stack.push(obj); - _preserved_mark_stack.push(obj_mark); - } -} - bool PSScavenge::should_attempt_scavenge() { ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psScavenge.hpp --- a/hotspot/src/share/vm/gc/parallel/psScavenge.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -79,8 +79,6 @@ static HeapWord* _young_generation_boundary; // Used to optimize compressed oops young gen boundary checking. static uintptr_t _young_generation_boundary_compressed; - static Stack _preserved_mark_stack; // List of marks to be restored after failed promotion - static Stack _preserved_oop_stack; // List of oops that need their mark restored. static CollectorCounters* _counters; // collector performance counters static void clean_up_failed_promotion(); @@ -127,9 +125,6 @@ // Return true if a collection was done; false otherwise. static bool invoke_no_policy(); - // If an attempt to promote fails, this method is invoked - static void oop_promotion_failed(oop obj, markOop obj_mark); - template static inline bool should_scavenge(T* p); // These call should_scavenge() above and, if it returns true, also check that diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psScavenge.inline.hpp --- a/hotspot/src/share/vm/gc/parallel/psScavenge.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -31,6 +31,7 @@ #include "gc/parallel/psScavenge.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "utilities/globalDefinitions.hpp" inline void PSScavenge::save_to_space_top_before_gc() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/parallel/psTasks.cpp --- a/hotspot/src/share/vm/gc/parallel/psTasks.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/parallel/psTasks.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ #include "gc/parallel/psTasks.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "runtime/fprofiler.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/serial/defNewGeneration.cpp --- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -43,6 +43,7 @@ #include "gc/shared/strongRootsScope.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -460,11 +461,11 @@ (HeapWord*)_virtual_space.high()); gch->barrier_set()->resize_covered_region(cmr); - log_debug(gc, heap, ergo)( + log_debug(gc, ergo, heap)( "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", new_size_before/K, _virtual_space.committed_size()/K, eden()->capacity()/K, from()->capacity()/K); - log_trace(gc, heap, ergo)( + log_trace(gc, ergo, heap)( " [allowed " SIZE_FORMAT "K extra for %d threads]", thread_increase_size/K, threads_count); } @@ -594,7 +595,7 @@ init_assuming_no_promotion_failure(); - GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause()); + GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause()); gch->trace_heap_before_gc(&gc_tracer); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/serial/genMarkSweep.cpp --- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -180,7 +180,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -208,6 +208,8 @@ // Process reference objects found during marking { + GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); + ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( @@ -218,20 +220,30 @@ // This is the point where the entire marking should have completed. assert(_marking_stack.is_empty(), "Marking should have completed"); - // Unload classes and purge the SystemDictionary. - bool purged_class = SystemDictionary::do_unloading(&is_alive); + { + GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer()); - // Unload nmethods. - CodeCache::do_unloading(&is_alive, purged_class); + // Unload classes and purge the SystemDictionary. + bool purged_class = SystemDictionary::do_unloading(&is_alive); + + // Unload nmethods. + CodeCache::do_unloading(&is_alive, purged_class); - // Prune dead klasses from subklass/sibling/implementor lists. - Klass::clean_weak_klass_links(&is_alive); + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(&is_alive); + } - // Delete entries for dead interned strings. - StringTable::unlink(&is_alive); + { + GCTraceTime(Debug, gc, phases) t("Scrub String Table", gc_timer()); + // Delete entries for dead interned strings. + StringTable::unlink(&is_alive); + } - // Clean up unreferenced symbols in symbol table. - SymbolTable::unlink(); + { + GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", gc_timer()); + // Clean up unreferenced symbols in symbol table. + SymbolTable::unlink(); + } gc_tracer()->report_object_count_after_gc(&is_alive); } @@ -253,7 +265,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); gch->prepare_for_compaction(); } @@ -269,7 +281,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); @@ -321,7 +333,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer); + GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); GenCompactClosure blk; gch->generation_iterate(&blk, true); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/barrierSet.cpp --- a/hotspot/src/share/vm/gc/shared/barrierSet.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/barrierSet.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,10 +30,6 @@ // count is number of array elements being written void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) { assert(count <= (size_t)max_intx, "count too large"); -#if 0 - warning("Pre: \t" INTPTR_FORMAT "[" SIZE_FORMAT "]\t", - start, count); -#endif if (UseCompressedOops) { Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count, false); } else { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp --- a/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/barrierSet.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -78,10 +78,6 @@ // If compressed oops were not being used, these should already be aligned assert(UseCompressedOops || (aligned_start == start && aligned_end == end), "Expected heap word alignment of start and end"); -#if 0 - warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT "," INTPTR_FORMAT ")\t", - start, count, aligned_start, aligned_end); -#endif write_ref_array_work(MemRegion(aligned_start, aligned_end)); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/cardGeneration.cpp --- a/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,7 +144,7 @@ const size_t remaining_bytes = _virtual_space.uncommitted_size(); if (remaining_bytes > 0) { success = grow_by(remaining_bytes); - DEBUG_ONLY(if (!success) warning("grow to reserved failed");) + DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");) } return success; } @@ -254,19 +254,22 @@ if (capacity_after_gc > maximum_desired_capacity) { // Capacity too large, compute shrinking size shrink_bytes = capacity_after_gc - maximum_desired_capacity; - // We don't want shrink all the way back to initSize if people call - // System.gc(), because some programs do that between "phases" and then - // we'd just have to grow the heap up again for the next phase. So we - // damp the shrinking: 0% on the first call, 10% on the second call, 40% - // on the third call, and 100% by the fourth call. But if we recompute - // size without shrinking, it goes back to 0%. - shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + if (ShrinkHeapInSteps) { + // If ShrinkHeapInSteps is true (the default), + // we don't want to shrink all the way back to initSize if people call + // System.gc(), because some programs do that between "phases" and then + // we'd just have to grow the heap up again for the next phase. So we + // damp the shrinking: 0% on the first call, 10% on the second call, 40% + // on the third call, and 100% by the fourth call. But if we recompute + // size without shrinking, it goes back to 0%. + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + if (current_shrink_factor == 0) { + _shrink_factor = 10; + } else { + _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); + } + } assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); - if (current_shrink_factor == 0) { - _shrink_factor = 10; - } else { - _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); - } log_trace(gc, heap)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK", initial_size() / (double) K, maximum_desired_capacity / (double) K); log_trace(gc, heap)(" shrink_bytes: %.1fK current_shrink_factor: " SIZE_FORMAT " new shrink factor: " SIZE_FORMAT " _min_heap_delta_bytes: %.1fK", diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -500,16 +500,14 @@ bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); if (failed) { if (!failures) { - tty->cr(); - tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); - tty->print_cr("== %sexpecting value: %d", - (val_equals) ? "" : "not ", val); + log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); + log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); failures = true; } - tty->print_cr("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], " - "val: %d", p2i(curr), p2i(addr_for(curr)), - p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), - (int) curr_val); + log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", + p2i(curr), p2i(addr_for(curr)), + p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), + (int) curr_val); } } guarantee(!failures, "there should not have been any failures"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,14 +89,6 @@ MemRegion _guard_region; protected: - // Initialization utilities; covered_words is the size of the covered region - // in, um, words. - inline size_t cards_required(size_t covered_words) { - // Add one for a guard card, used to detect errors. - const size_t words = align_size_up(covered_words, card_size_in_words); - return words / card_size_in_words + 1; - } - inline size_t compute_byte_map_size(); // Finds and return the index of the region, if any, to which the given @@ -172,6 +164,14 @@ bool has_write_ref_pre_barrier() { return false; } + // Initialization utilities; covered_words is the size of the covered region + // in, um, words. + inline size_t cards_required(size_t covered_words) { + // Add one for a guard card, used to detect errors. + const size_t words = align_size_up(covered_words, card_size_in_words); + return words / card_size_in_words + 1; + } + protected: CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/cardTableRS.cpp --- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -325,17 +325,17 @@ // In the case of CMS+ParNew, issue a warning if (!ur.contains(urasm)) { assert(UseConcMarkSweepGC, "Tautology: see assert above"); - warning("CMS+ParNew: Did you forget to call save_marks()? " - "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " - "[" PTR_FORMAT ", " PTR_FORMAT ")", - p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); + log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " + "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " + "[" PTR_FORMAT ", " PTR_FORMAT ")", + p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); MemRegion ur2 = sp->used_region(); MemRegion urasm2 = sp->used_region_at_save_marks(); if (!ur.equals(ur2)) { - warning("CMS+ParNew: Flickering used_region()!!"); + log_warning(gc)("CMS+ParNew: Flickering used_region()!!"); } if (!urasm.equals(urasm2)) { - warning("CMS+ParNew: Flickering used_region_at_save_marks()!!"); + log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!"); } ShouldNotReachHere(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/collectedHeap.cpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -35,6 +35,7 @@ #include "gc/shared/vmGCOperations.hpp" #include "logging/log.hpp" #include "memory/metaspace.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/init.hpp" @@ -213,7 +214,7 @@ do_full_collection(false); // don't clear all soft refs break; } - case GCCause::_last_ditch_collection: { + case GCCause::_metadata_GC_clear_soft_refs: { HandleMark hm; do_full_collection(true); // do clear all soft refs break; @@ -580,7 +581,7 @@ HeapDumper::dump_heap(); } - LogHandle(gc, classhisto) log; + Log(gc, classhisto) log; if (log.is_trace()) { GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); ResourceMark rm; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/collectedHeap.hpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -438,6 +438,12 @@ // remembered set. virtual void flush_deferred_store_barrier(JavaThread* thread); + // Should return true if the reference pending list lock is + // acquired from non-Java threads, such as a concurrent GC thread. + virtual bool needs_reference_pending_list_locker_thread() const { + return false; + } + // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/collectorPolicy.cpp --- a/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -96,6 +96,9 @@ } // Check heap parameter properties + if (MaxHeapSize < 2 * M) { + vm_exit_during_initialization("Too small maximum heap"); + } if (InitialHeapSize < M) { vm_exit_during_initialization("Too small initial heap"); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp --- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,12 +37,12 @@ _should_terminate(false), _has_terminated(false) { }; -void ConcurrentGCThread::create_and_start() { +void ConcurrentGCThread::create_and_start(ThreadPriority prio) { if (os::create_thread(this, os::cgc_thread)) { // XXX: need to set this to low priority // unless "aggressive mode" set; priority // should be just less than that of VMThread. - os::set_priority(this, NearMaxPriority); + os::set_priority(this, prio); if (!_should_terminate && !DisableStartThread) { os::start_thread(this); } @@ -75,130 +75,30 @@ } } -static void _sltLoop(JavaThread* thread, TRAPS) { - SurrogateLockerThread* slt = (SurrogateLockerThread*)thread; - slt->loop(); -} - -SurrogateLockerThread::SurrogateLockerThread() : - JavaThread(&_sltLoop), - _monitor(Mutex::nonleaf, "SLTMonitor", false, - Monitor::_safepoint_check_sometimes), - _buffer(empty) -{} - -SurrogateLockerThread* SurrogateLockerThread::make(TRAPS) { - Klass* k = - SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), - true, CHECK_NULL); - instanceKlassHandle klass (THREAD, k); - instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL); - - const char thread_name[] = "Surrogate Locker Thread (Concurrent GC)"; - Handle string = java_lang_String::create_from_str(thread_name, CHECK_NULL); +void ConcurrentGCThread::run() { + initialize_in_thread(); + wait_for_universe_init(); - // Initialize thread_oop to put it into the system threadGroup - Handle thread_group (THREAD, Universe::system_thread_group()); - JavaValue result(T_VOID); - JavaCalls::call_special(&result, thread_oop, - klass, - vmSymbols::object_initializer_name(), - vmSymbols::threadgroup_string_void_signature(), - thread_group, - string, - CHECK_NULL); - - SurrogateLockerThread* res; - { - MutexLocker mu(Threads_lock); - res = new SurrogateLockerThread(); + run_service(); - // At this point it may be possible that no osthread was created for the - // JavaThread due to lack of memory. We would have to throw an exception - // in that case. However, since this must work and we do not allow - // exceptions anyway, check and abort if this fails. - if (res == NULL || res->osthread() == NULL) { - vm_exit_during_initialization("java.lang.OutOfMemoryError", - os::native_thread_creation_failed_msg()); - } - java_lang_Thread::set_thread(thread_oop(), res); - java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); - java_lang_Thread::set_daemon(thread_oop()); - - res->set_threadObj(thread_oop()); - Threads::add(res); - Thread::start(res); - } - os::naked_yield(); // This seems to help with initial start-up of SLT - return res; + terminate(); } -void SurrogateLockerThread::report_missing_slt() { - vm_exit_during_initialization( - "GC before GC support fully initialized: " - "SLT is needed but has not yet been created."); - ShouldNotReachHere(); -} +void ConcurrentGCThread::stop() { + // it is ok to take late safepoints here, if needed + { + MutexLockerEx mu(Terminator_lock); + assert(!_has_terminated, "stop should only be called once"); + assert(!_should_terminate, "stop should only be called once"); + _should_terminate = true; + } -void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) { - MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); - assert(_buffer == empty, "Should be empty"); - assert(msg != empty, "empty message"); - assert(!Heap_lock->owned_by_self(), "Heap_lock owned by requesting thread"); + stop_service(); - _buffer = msg; - while (_buffer != empty) { - _monitor.notify(); - _monitor.wait(Mutex::_no_safepoint_check_flag); + { + MutexLockerEx mu(Terminator_lock); + while (!_has_terminated) { + Terminator_lock->wait(); + } } } - -// ======= Surrogate Locker Thread ============= - -void SurrogateLockerThread::loop() { - BasicLock pll_basic_lock; - SLT_msg_type msg; - debug_only(unsigned int owned = 0;) - - while (/* !isTerminated() */ 1) { - { - MutexLocker x(&_monitor); - // Since we are a JavaThread, we can't be here at a safepoint. - assert(!SafepointSynchronize::is_at_safepoint(), - "SLT is a JavaThread"); - // wait for msg buffer to become non-empty - while (_buffer == empty) { - _monitor.notify(); - _monitor.wait(); - } - msg = _buffer; - } - switch(msg) { - case acquirePLL: { - InstanceRefKlass::acquire_pending_list_lock(&pll_basic_lock); - debug_only(owned++;) - break; - } - case releaseAndNotifyPLL: { - assert(owned > 0, "Don't have PLL"); - InstanceRefKlass::release_and_notify_pending_list_lock(&pll_basic_lock); - debug_only(owned--;) - break; - } - case empty: - default: { - guarantee(false,"Unexpected message in _buffer"); - break; - } - } - { - MutexLocker x(&_monitor); - // Since we are a JavaThread, we can't be here at a safepoint. - assert(!SafepointSynchronize::is_at_safepoint(), - "SLT is a JavaThread"); - _buffer = empty; - _monitor.notify(); - } - } - assert(!_monitor.owned_by_self(), "Should unlock before exit."); -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/concurrentGCThread.hpp --- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -31,13 +31,9 @@ class ConcurrentGCThread: public NamedThread { friend class VMStructs; -protected: bool volatile _should_terminate; bool _has_terminated; - // Create and start the thread (setting it's priority high.) - void create_and_start(); - // Do initialization steps in the thread: record stack base and size, // init thread local storage, set JNI handle block. void initialize_in_thread(); @@ -49,44 +45,29 @@ // concurrent work. void terminate(); +protected: + // Create and start the thread (setting it's priority.) + void create_and_start(ThreadPriority prio = NearMaxPriority); + + // Do the specific GC work. Called by run() after initialization complete. + virtual void run_service() = 0; + + // Shut down the specific GC work. Called by stop() as part of termination protocol. + virtual void stop_service() = 0; + public: ConcurrentGCThread(); // Tester bool is_ConcurrentGC_thread() const { return true; } -}; + + virtual void run(); -// The SurrogateLockerThread is used by concurrent GC threads for -// manipulating Java monitors, in particular, currently for -// manipulating the pending_list_lock. XXX -class SurrogateLockerThread: public JavaThread { - friend class VMStructs; - public: - enum SLT_msg_type { - empty = 0, // no message - acquirePLL, // acquire pending list lock - releaseAndNotifyPLL // notify and release pending list lock - }; - private: - // the following are shared with the CMSThread - SLT_msg_type _buffer; // communication buffer - Monitor _monitor; // monitor controlling buffer - BasicLock _basicLock; // used for PLL locking + // shutdown following termination protocol + virtual void stop(); - public: - static SurrogateLockerThread* make(TRAPS); - - // Terminate VM with error message that SLT needed but not yet created. - static void report_missing_slt(); - - SurrogateLockerThread(); - - bool is_hidden_from_external_view() const { return true; } - - void loop(); // main method - - void manipulatePLL(SLT_msg_type msg); - + bool should_terminate() { return _should_terminate; } + bool has_terminated() { return _has_terminated; } }; #endif // SHARE_VM_GC_SHARED_CONCURRENTGCTHREAD_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcCause.cpp --- a/hotspot/src/share/vm/gc/shared/gcCause.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcCause.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -57,6 +57,9 @@ case _wb_conc_mark: return "WhiteBox Initiated Concurrent Mark"; + case _wb_full_gc: + return "WhiteBox Initiated Full GC"; + case _update_allocation_context_stats_inc: case _update_allocation_context_stats_full: return "Update Allocation Context Stats"; @@ -73,6 +76,9 @@ case _metadata_GC_threshold: return "Metadata GC Threshold"; + case _metadata_GC_clear_soft_refs: + return "Metadata GC Clear Soft References"; + case _cms_generation_full: return "CMS Generation Full"; @@ -100,9 +106,6 @@ case _g1_humongous_allocation: return "G1 Humongous Allocation"; - case _last_ditch_collection: - return "Last ditch collection"; - case _dcmd_gc_run: return "Diagnostic Command"; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcCause.hpp --- a/hotspot/src/share/vm/gc/shared/gcCause.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcCause.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,6 +33,9 @@ // use of this class grows, we should split it into public // and implementation-private "causes". // +// The definitions in the SA code should be kept in sync +// with the definitions here. +// class GCCause : public AllStatic { public: @@ -48,6 +51,7 @@ _heap_dump, _wb_young_gc, _wb_conc_mark, + _wb_full_gc, _update_allocation_context_stats_inc, _update_allocation_context_stats_full, @@ -60,6 +64,7 @@ _tenured_generation_full, _metadata_GC_threshold, + _metadata_GC_clear_soft_refs, _cms_generation_full, _cms_initial_mark, @@ -73,8 +78,6 @@ _g1_inc_collection_pause, _g1_humongous_allocation, - _last_ditch_collection, - _dcmd_gc_run, _last_gc_cause @@ -103,22 +106,18 @@ // _allocation_failure is the generic cause a collection which could result // in the collection of the tenured generation if there is not enough space // in the tenured generation to support a young GC. - // _last_ditch_collection is a collection done to include SoftReferences. return (cause == GCCause::_tenured_generation_full || cause == GCCause::_cms_generation_full || cause == GCCause::_adaptive_size_policy || - cause == GCCause::_allocation_failure || - cause == GCCause::_last_ditch_collection); + cause == GCCause::_allocation_failure); } // Causes for collection of the young generation inline static bool is_allocation_failure_gc(GCCause::Cause cause) { // _allocation_failure is the generic cause a collection for allocation failure // _adaptive_size_policy is for a collecton done before a full GC - // _last_ditch_collection is a collection done to include SoftReferences. return (cause == GCCause::_allocation_failure || - cause == GCCause::_adaptive_size_policy || - cause == GCCause::_last_ditch_collection); + cause == GCCause::_adaptive_size_policy); } // Return a string describing the GCCause. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcLocker.cpp --- a/hotspot/src/share/vm/gc/shared/gcLocker.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcLocker.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -51,10 +51,10 @@ } } if (_jni_lock_count != count) { - tty->print_cr("critical counts don't match: %d != %d", _jni_lock_count, count); + log_error(gc, verify)("critical counts don't match: %d != %d", _jni_lock_count, count); for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) { if (thr->in_critical()) { - tty->print_cr(INTPTR_FORMAT " in_critical %d", p2i(thr), thr->in_critical()); + log_error(gc, verify)(INTPTR_FORMAT " in_critical %d", p2i(thr), thr->in_critical()); } } } @@ -75,7 +75,7 @@ #endif void GCLocker::log_debug_jni(const char* msg) { - LogHandle(gc, jni) log; + Log(gc, jni) log; if (log.is_debug()) { ResourceMark rm; // JavaThread::name() allocates to convert to UTF8 log.debug("%s Thread \"%s\" %d locked.", msg, Thread::current()->name(), _jni_lock_count); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcTrace.cpp --- a/hotspot/src/share/vm/gc/shared/gcTrace.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcTrace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -242,4 +242,12 @@ prediction_active); } +void G1OldTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) { + _shared_gc_info.set_start_timestamp(timestamp); +} + +void G1OldTracer::set_gc_cause(GCCause::Cause cause) { + _shared_gc_info.set_cause(cause); +} + #endif diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcTrace.hpp --- a/hotspot/src/share/vm/gc/shared/gcTrace.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcTrace.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -126,7 +126,7 @@ protected: GCTracer(GCName name) : _shared_gc_info(name) {} - void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); + virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions); private: @@ -297,8 +297,11 @@ }; class G1OldTracer : public OldGCTracer { + protected: + void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); public: G1OldTracer() : OldGCTracer(G1Old) {} + void set_gc_cause(GCCause::Cause cause); }; #endif // SHARE_VM_GC_SHARED_GCTRACE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/gcTraceTime.hpp --- a/hotspot/src/share/vm/gc/shared/gcTraceTime.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/gcTraceTime.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -26,6 +26,8 @@ #define SHARE_VM_GC_SHARED_GCTRACETIME_HPP #include "logging/log.hpp" +#include "logging/logHandle.hpp" +#include "logging/logStream.hpp" #include "memory/allocation.hpp" #include "utilities/ticks.hpp" @@ -41,10 +43,10 @@ class GCTimer; -template class GCTraceTimeImpl : public StackObj { private: + LogTargetHandle _out_start; + LogTargetHandle _out_stop; bool _enabled; Ticks _start_ticks; const char* _title; @@ -57,10 +59,18 @@ void time_stamp(Ticks& ticks); public: - GCTraceTimeImpl(const char* title, GCTimer* timer = NULL, GCCause::Cause gc_cause = GCCause::_no_gc, bool log_heap_usage = false); + GCTraceTimeImpl(LogTargetHandle out_start, LogTargetHandle out_end, const char* title, GCTimer* timer = NULL, GCCause::Cause gc_cause = GCCause::_no_gc, bool log_heap_usage = false); ~GCTraceTimeImpl(); }; +template +class GCTraceTimeImplWrapper : public StackObj { + GCTraceTimeImpl _impl; +public: + GCTraceTimeImplWrapper(const char* title, GCTimer* timer = NULL, GCCause::Cause gc_cause = GCCause::_no_gc, bool log_heap_usage = false); + ~GCTraceTimeImplWrapper(); +}; + // Similar to GCTraceTimeImpl but is intended for concurrent phase logging, // which is a bit simpler and should always print the start line, i.e. not add the "start" tag. template " SIZE_FORMAT "M(" SIZE_FORMAT "M)" -template -void GCTraceTimeImpl::log_start(jlong start_counter) { - if (Log::is_level(Level)) { - FormatBuffer<> start_msg("%s", _title); +inline void GCTraceTimeImpl::log_start(jlong start_counter) { + if (_out_start.is_enabled()) { + LogStream out(_out_start); + + out.print("%s", _title); if (_gc_cause != GCCause::_no_gc) { - start_msg.append(" (%s)", GCCause::to_string(_gc_cause)); + out.print(" (%s)", GCCause::to_string(_gc_cause)); } - start_msg.append(" (%.3fs)", TimeHelper::counter_to_seconds(start_counter)); - // Make sure to put the "start" tag last in the tag set - STATIC_ASSERT(T0 != LogTag::__NO_TAG); // Need some tag to log on. - STATIC_ASSERT(T4 == LogTag::__NO_TAG); // Need to leave at least the last tag for the "start" tag in log_start() - if (T1 == LogTag::__NO_TAG) { - Log::template write("%s", start_msg.buffer()); - } else if (T2 == LogTag::__NO_TAG) { - Log::template write("%s", start_msg.buffer()); - } else if (T3 == LogTag::__NO_TAG) { - Log::template write("%s", start_msg.buffer()); - } else { - Log::template write("%s", start_msg.buffer()); - } + out.print_cr(" (%.3fs)", TimeHelper::counter_to_seconds(start_counter)); } } -template -void GCTraceTimeImpl::log_stop(jlong start_counter, jlong stop_counter) { +inline void GCTraceTimeImpl::log_stop(jlong start_counter, jlong stop_counter) { double duration_in_ms = TimeHelper::counter_to_millis(stop_counter - start_counter); double start_time_in_secs = TimeHelper::counter_to_seconds(start_counter); double stop_time_in_secs = TimeHelper::counter_to_seconds(stop_counter); - FormatBuffer<> stop_msg("%s", _title); + + LogStream out(_out_stop); + + out.print("%s", _title); + if (_gc_cause != GCCause::_no_gc) { - stop_msg.append(" (%s)", GCCause::to_string(_gc_cause)); + out.print(" (%s)", GCCause::to_string(_gc_cause)); } - if (_heap_usage_before == SIZE_MAX) { - Log::template write("%s " LOG_STOP_TIME_FORMAT, - stop_msg.buffer(), start_time_in_secs, stop_time_in_secs, duration_in_ms); - } else { + + if (_heap_usage_before != SIZE_MAX) { CollectedHeap* heap = Universe::heap(); size_t used_before_m = _heap_usage_before / M; size_t used_m = heap->used() / M; size_t capacity_m = heap->capacity() / M; - Log::template write("%s " LOG_STOP_HEAP_FORMAT " " LOG_STOP_TIME_FORMAT, - stop_msg.buffer(), used_before_m, used_m, capacity_m, start_time_in_secs, stop_time_in_secs, duration_in_ms); + out.print(" " LOG_STOP_HEAP_FORMAT, used_before_m, used_m, capacity_m); } + + out.print_cr(" " LOG_STOP_TIME_FORMAT, start_time_in_secs, stop_time_in_secs, duration_in_ms); } -template -void GCTraceTimeImpl::time_stamp(Ticks& ticks) { +inline void GCTraceTimeImpl::time_stamp(Ticks& ticks) { if (_enabled || _timer != NULL) { ticks.stamp(); } } -template -GCTraceTimeImpl::GCTraceTimeImpl(const char* title, GCTimer* timer, GCCause::Cause gc_cause, bool log_heap_usage) : - _enabled(Log::is_level(Level)), +inline GCTraceTimeImpl::GCTraceTimeImpl(LogTargetHandle out_start, LogTargetHandle out_stop, const char* title, GCTimer* timer, GCCause::Cause gc_cause, bool log_heap_usage) : + _enabled(out_stop.is_enabled()), + _out_start(out_start), + _out_stop(out_stop), _start_ticks(), _heap_usage_before(SIZE_MAX), _title(title), @@ -111,8 +102,7 @@ } } -template -GCTraceTimeImpl::~GCTraceTimeImpl() { +inline GCTraceTimeImpl::~GCTraceTimeImpl() { Ticks stop_ticks; time_stamp(stop_ticks); if (_enabled) { @@ -125,9 +115,9 @@ template GCTraceConcTimeImpl::GCTraceConcTimeImpl(const char* title) : - _enabled(Log::is_level(Level)), _start_time(os::elapsed_counter()), _title(title) { + _enabled(LogImpl::is_level(Level)), _start_time(os::elapsed_counter()), _title(title) { if (_enabled) { - Log::template write("%s (%.3fs)", _title, TimeHelper::counter_to_seconds(_start_time)); + LogImpl::template write("%s (%.3fs)", _title, TimeHelper::counter_to_seconds(_start_time)); } } @@ -135,7 +125,7 @@ GCTraceConcTimeImpl::~GCTraceConcTimeImpl() { if (_enabled) { jlong stop_time = os::elapsed_counter(); - Log::template write("%s " LOG_STOP_TIME_FORMAT, + LogImpl::template write("%s " LOG_STOP_TIME_FORMAT, _title, TimeHelper::counter_to_seconds(_start_time), TimeHelper::counter_to_seconds(stop_time), @@ -143,7 +133,34 @@ } } -#define GCTraceTime(Level, ...) GCTraceTimeImpl +// Figure out the first __NO_TAG position and replace it with 'start'. +#define INJECT_START_TAG(T1, T2, T3, T4) \ + (( T1 == LogTag::__NO_TAG) ? PREFIX_LOG_TAG(start) : T1), \ + ((T1 != LogTag::__NO_TAG && T2 == LogTag::__NO_TAG) ? PREFIX_LOG_TAG(start) : T2), \ + ((T2 != LogTag::__NO_TAG && T3 == LogTag::__NO_TAG) ? PREFIX_LOG_TAG(start) : T3), \ + ((T3 != LogTag::__NO_TAG && T4 == LogTag::__NO_TAG) ? PREFIX_LOG_TAG(start) : T4) + +template +GCTraceTimeImplWrapper::GCTraceTimeImplWrapper( + const char* title, GCTimer* timer, GCCause::Cause gc_cause, bool log_heap_usage) + : _impl( + LogTargetHandle::create(), + LogTargetHandle::create(), + title, + timer, + gc_cause, + log_heap_usage) { + + STATIC_ASSERT(T0 != LogTag::__NO_TAG); // Need some tag to log on. + STATIC_ASSERT(T4 == LogTag::__NO_TAG); // Need to leave at least the last tag for the "start" tag in log_start() +} + +#undef INJECT_START_TAG + +template +GCTraceTimeImplWrapper::~GCTraceTimeImplWrapper() {} + +#define GCTraceTime(Level, ...) GCTraceTimeImplWrapper #define GCTraceConcTime(Level, ...) GCTraceConcTimeImpl #endif // SHARE_VM_GC_SHARED_GCTRACETIME_INLINE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp --- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -295,7 +295,8 @@ } bool GenCollectedHeap::must_clear_all_soft_refs() { - return _gc_cause == GCCause::_last_ditch_collection; + return _gc_cause == GCCause::_metadata_GC_clear_soft_refs || + _gc_cause == GCCause::_wb_full_gc; } bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { @@ -315,7 +316,7 @@ bool is_tlab, bool run_verification, bool clear_soft_refs, bool restore_marks_for_biased_locking) { FormatBuffer<> title("Collect gen: %s", gen->short_name()); - GCTraceTime(Debug, gc) t1(title); + GCTraceTime(Trace, gc, phases) t1(title); TraceCollectorStats tcs(gen->counters()); TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); @@ -684,15 +685,8 @@ _process_strong_tasks->all_tasks_completed(scope->n_threads()); } - -class AlwaysTrueClosure: public BoolObjectClosure { -public: - bool do_object_b(oop p) { return true; } -}; -static AlwaysTrueClosure always_true; - void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { - JNIHandles::weak_oops_do(&always_true, root_closure); + JNIHandles::weak_oops_do(root_closure); _young_gen->ref_processor()->weak_oops_do(root_closure); _old_gen->ref_processor()->weak_oops_do(root_closure); } @@ -1272,7 +1266,7 @@ // back a time later than 'now'. jlong retVal = now - tolgc_cl.time(); if (retVal < 0) { - NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, retVal);) + NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, retVal);) return 0; } return retVal; @@ -1281,7 +1275,7 @@ void GenCollectedHeap::stop() { #if INCLUDE_ALL_GCS if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::stop(); + ConcurrentMarkSweepThread::cmst()->stop(); } #endif } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp --- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -281,6 +281,10 @@ return UseConcMarkSweepGC; } + virtual bool needs_reference_pending_list_locker_thread() const { + return UseConcMarkSweepGC; + } + // We don't need barriers for stores to objects in the // young gen and, a fortiori, for initializing stores to // objects therein. This applies to DefNew+Tenured and ParNew+CMS diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/generation.hpp --- a/hotspot/src/share/vm/gc/shared/generation.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/generation.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc/shared/collectorCounters.hpp" #include "gc/shared/referenceProcessor.hpp" +#include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/memRegion.hpp" #include "memory/universe.hpp" @@ -377,7 +378,7 @@ // have to guard against non-monotonicity. NOT_PRODUCT( if (now < _time_of_last_gc) { - warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); + log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); } ) return _time_of_last_gc; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/liveRange.hpp --- a/hotspot/src/share/vm/gc/shared/liveRange.hpp Mon Apr 18 16:18:56 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_SHARED_LIVERANGE_HPP -#define SHARE_VM_GC_SHARED_LIVERANGE_HPP - -#include "memory/memRegion.hpp" -#include "utilities/copy.hpp" - -// This is a shared helper class used during phase 3 and 4 to move all the objects -// Dead regions in a Space are linked together to keep track of the live regions -// so that the live data can be traversed quickly without having to look at each -// object. - -class LiveRange: public MemRegion { -public: - LiveRange(HeapWord* bottom, HeapWord* top): MemRegion(bottom, top) {} - - void set_end(HeapWord* e) { - assert(e >= start(), "should be a non-zero range"); - MemRegion::set_end(e); - } - void set_word_size(size_t ws) { - MemRegion::set_word_size(ws); - } - - LiveRange * next() { return (LiveRange *) end(); } - - void move_to(HeapWord* destination) { - Copy::aligned_conjoint_words(start(), destination, word_size()); - } -}; - -#endif // SHARE_VM_GC_SHARED_LIVERANGE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/plab.cpp --- a/hotspot/src/share/vm/gc/shared/plab.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/plab.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -136,7 +136,7 @@ // Calculates plab size for current number of gc worker threads. size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) { - return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers)); + return (size_t)align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size())); } // Compute desired plab size for one gc worker thread and latch result for later @@ -175,14 +175,9 @@ size_t recent_plab_sz = used / target_refills; // Take historical weighted average _filter.sample(recent_plab_sz); - // Clip from above and below, and align to object boundary - size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average()); - new_plab_sz = MIN2(max_size(), new_plab_sz); - new_plab_sz = align_object_size(new_plab_sz); - // Latch the result - _desired_net_plab_sz = new_plab_sz; + _desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average()); - log_sizing(recent_plab_sz, new_plab_sz); + log_sizing(recent_plab_sz, _desired_net_plab_sz); reset(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/preservedMarks.cpp --- a/hotspot/src/share/vm/gc/shared/preservedMarks.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/preservedMarks.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -62,9 +62,14 @@ } void PreservedMarksSet::restore() { + size_t total_size = 0; for (uint i = 0; i < _num; i += 1) { + total_size += get(i)->size(); get(i)->restore(); } + assert_empty(); + + log_trace(gc)("Restored " SIZE_FORMAT " marks", total_size); } void PreservedMarksSet::reclaim() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/preservedMarks.hpp --- a/hotspot/src/share/vm/gc/shared/preservedMarks.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/preservedMarks.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -53,6 +53,7 @@ public: bool is_empty() const { return _stack.is_empty(); } + size_t size() const { return _stack.size(); } inline void push_if_necessary(oop obj, markOop m); // Iterate over the stack, restore the preserved marks, then reclaim // the memory taken up by stack chunks. @@ -65,7 +66,7 @@ virtual void do_object(oop obj); }; -class PreservedMarksSet VALUE_OBJ_CLASS_SPEC { +class PreservedMarksSet : public CHeapObj { private: // true -> _stacks will be allocated in the C heap // false -> _stacks will be allocated in the resource arena diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/referencePendingListLocker.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/shared/referencePendingListLocker.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/systemDictionary.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/referencePendingListLocker.hpp" +#include "memory/universe.hpp" +#include "runtime/javaCalls.hpp" +#include "utilities/preserveException.hpp" + +ReferencePendingListLockerThread::ReferencePendingListLockerThread() : + JavaThread(&start), + _monitor(Monitor::nonleaf, "ReferencePendingListLocker", false, Monitor::_safepoint_check_sometimes), + _message(NONE) {} + +ReferencePendingListLockerThread* ReferencePendingListLockerThread::create(TRAPS) { + // Create Java thread objects + instanceKlassHandle thread_klass = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL); + instanceHandle thread_object = thread_klass->allocate_instance_handle(CHECK_NULL); + Handle thread_name = java_lang_String::create_from_str("Reference Pending List Locker", CHECK_NULL); + Handle thread_group = Universe::system_thread_group(); + JavaValue result(T_VOID); + JavaCalls::call_special(&result, + thread_object, + thread_klass, + vmSymbols::object_initializer_name(), + vmSymbols::threadgroup_string_void_signature(), + thread_group, + thread_name, + CHECK_NULL); + + { + MutexLocker ml(Threads_lock); + + // Allocate thread + ReferencePendingListLockerThread* thread = new ReferencePendingListLockerThread(); + if (thread == NULL || thread->osthread() == NULL) { + vm_exit_during_initialization("java.lang.OutOfMemoryError", + os::native_thread_creation_failed_msg()); + } + + // Initialize thread + java_lang_Thread::set_thread(thread_object(), thread); + java_lang_Thread::set_priority(thread_object(), NearMaxPriority); + java_lang_Thread::set_daemon(thread_object()); + thread->set_threadObj(thread_object()); + + // Start thread + Threads::add(thread); + Thread::start(thread); + + return thread; + } +} + +void ReferencePendingListLockerThread::start(JavaThread* thread, TRAPS) { + ReferencePendingListLockerThread* locker_thread = static_cast(thread); + locker_thread->receive_and_handle_messages(); +} + +bool ReferencePendingListLockerThread::is_hidden_from_external_view() const { + return true; +} + +void ReferencePendingListLockerThread::send_message(Message message) { + assert(message != NONE, "Should not be none"); + MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag); + + // Wait for completion of current message + while (_message != NONE) { + ml.wait(Monitor::_no_safepoint_check_flag); + } + + // Send new message + _message = message; + ml.notify_all(); + + // Wait for completion of new message + while (_message != NONE) { + ml.wait(Monitor::_no_safepoint_check_flag); + } +} + +void ReferencePendingListLockerThread::receive_and_handle_messages() { + ReferencePendingListLocker pending_list_locker; + MonitorLockerEx ml(&_monitor); + + // Main loop, never terminates + for (;;) { + // Wait for message + while (_message == NONE) { + ml.wait(); + } + + // Handle message + if (_message == LOCK) { + pending_list_locker.lock(); + } else if (_message == UNLOCK) { + pending_list_locker.unlock(); + } else { + ShouldNotReachHere(); + } + + // Clear message + _message = NONE; + ml.notify_all(); + } +} + +void ReferencePendingListLockerThread::lock() { + send_message(LOCK); +} + +void ReferencePendingListLockerThread::unlock() { + send_message(UNLOCK); +} + +bool ReferencePendingListLocker::_is_initialized = false; +ReferencePendingListLockerThread* ReferencePendingListLocker::_locker_thread = NULL; + +void ReferencePendingListLocker::initialize(bool needs_locker_thread, TRAPS) { + if (needs_locker_thread) { + _locker_thread = ReferencePendingListLockerThread::create(CHECK); + } + + _is_initialized = true; +} + +bool ReferencePendingListLocker::is_initialized() { + return _is_initialized; +} + +bool ReferencePendingListLocker::is_locked_by_self() { + oop pending_list_lock = java_lang_ref_Reference::pending_list_lock(); + if (pending_list_lock == NULL) { + return false; + } + + JavaThread* thread = JavaThread::current(); + Handle handle(thread, pending_list_lock); + return ObjectSynchronizer::current_thread_holds_lock(thread, handle); +} + +void ReferencePendingListLocker::lock() { + assert(!Heap_lock->owned_by_self(), "Heap_lock must not be owned by requesting thread"); + + if (Thread::current()->is_Java_thread()) { + assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized"); + + // We may enter this with a pending exception + PRESERVE_EXCEPTION_MARK; + + HandleMark hm; + Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock()); + + // Lock + ObjectSynchronizer::fast_enter(handle, &_basic_lock, false, THREAD); + + assert(is_locked_by_self(), "Locking failed"); + + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + } else { + // Delegate operation to locker thread + assert(_locker_thread != NULL, "Locker thread not created"); + _locker_thread->lock(); + } +} + +void ReferencePendingListLocker::unlock() { + if (Thread::current()->is_Java_thread()) { + assert(java_lang_ref_Reference::pending_list_lock() != NULL, "Not initialized"); + + // We may enter this with a pending exception + PRESERVE_EXCEPTION_MARK; + + HandleMark hm; + Handle handle(THREAD, java_lang_ref_Reference::pending_list_lock()); + + assert(is_locked_by_self(), "Should be locked by self"); + + // Notify waiters if the pending list is non-empty + if (java_lang_ref_Reference::pending_list() != NULL) { + ObjectSynchronizer::notifyall(handle, THREAD); + } + + // Unlock + ObjectSynchronizer::fast_exit(handle(), &_basic_lock, THREAD); + + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + } else { + // Delegate operation to locker thread + assert(_locker_thread != NULL, "Locker thread not created"); + _locker_thread->unlock(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/referencePendingListLocker.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc/shared/referencePendingListLocker.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP +#define SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP + +#include "memory/allocation.hpp" +#include "runtime/basicLock.hpp" +#include "runtime/mutex.hpp" +#include "runtime/thread.hpp" +#include "utilities/exceptions.hpp" + +// +// The ReferencePendingListLockerThread locks and unlocks the reference +// pending list lock on behalf a non-Java thread, typically a concurrent +// GC thread. This interface should not be directly accessed. All uses +// should instead go through the ReferencePendingListLocker, which calls +// this thread if needed. +// +class ReferencePendingListLockerThread : public JavaThread { +private: + enum Message { + NONE, + LOCK, + UNLOCK + }; + + Monitor _monitor; + Message _message; + + ReferencePendingListLockerThread(); + + static void start(JavaThread* thread, TRAPS); + + void send_message(Message message); + void receive_and_handle_messages(); + +public: + static ReferencePendingListLockerThread* create(TRAPS); + + virtual bool is_hidden_from_external_view() const; + + void lock(); + void unlock(); +}; + +// +// The ReferencePendingListLocker is the main interface for locking and +// unlocking the reference pending list lock, which needs to be held by +// the GC when adding references to the pending list. Since this is a +// Java-level monitor it can only be locked/unlocked by a Java thread. +// For this reason there is an option to spawn a helper thread, the +// ReferencePendingListLockerThread, during initialization. If a helper +// thread is spawned all lock operations from non-Java threads will be +// delegated to the helper thread. The helper thread is typically needed +// by concurrent GCs. +// +class ReferencePendingListLocker VALUE_OBJ_CLASS_SPEC { +private: + static bool _is_initialized; + static ReferencePendingListLockerThread* _locker_thread; + BasicLock _basic_lock; + +public: + static void initialize(bool needs_locker_thread, TRAPS); + static bool is_initialized(); + + static bool is_locked_by_self(); + + void lock(); + void unlock(); +}; + +#endif // SHARE_VM_GC_SHARED_REFERENCEPENDINGLISTLOCKER_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/referenceProcessor.cpp --- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,6 +33,7 @@ #include "gc/shared/referenceProcessor.inline.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" @@ -134,7 +135,7 @@ guarantee(!_discovering_refs, "Discovering refs?"); for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { guarantee(_discovered_refs[i].is_empty(), - "Found non-empty discovered list"); + "Found non-empty discovered list at %u", i); } } #endif @@ -161,8 +162,8 @@ NOT_PRODUCT( if (now < _soft_ref_timestamp_clock) { - warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, - _soft_ref_timestamp_clock, now); + log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, + _soft_ref_timestamp_clock, now); } ) // The values of now and _soft_ref_timestamp_clock are set using @@ -266,11 +267,6 @@ #ifndef PRODUCT // Calculate the number of jni handles. size_t ReferenceProcessor::count_jni_refs() { - class AlwaysAliveClosure: public BoolObjectClosure { - public: - virtual bool do_object_b(oop obj) { return true; } - }; - class CountHandleClosure: public OopClosure { private: size_t _count; @@ -281,8 +277,7 @@ size_t count() { return _count; } }; CountHandleClosure global_handle_count; - AlwaysAliveClosure always_alive; - JNIHandles::weak_oops_do(&always_alive, &global_handle_count); + JNIHandles::weak_oops_do(&global_handle_count); return global_handle_count.count(); } #endif @@ -645,9 +640,7 @@ OopClosure& keep_alive, VoidClosure& complete_gc) { - Thread* thr = Thread::current(); - int refs_list_index = ((WorkerThread*)thr)->id(); - _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, + _ref_processor.process_phase1(_refs_lists[i], _policy, &is_alive, &keep_alive, &complete_gc); } private: @@ -683,11 +676,6 @@ OopClosure& keep_alive, VoidClosure& complete_gc) { - // Don't use "refs_list_index" calculated in this way because - // balance_queues() has moved the Ref's into the first n queues. - // Thread* thr = Thread::current(); - // int refs_list_index = ((WorkerThread*)thr)->id(); - // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, _ref_processor.process_phase3(_refs_lists[i], _clear_referent, &is_alive, &keep_alive, &complete_gc); } @@ -696,19 +684,30 @@ }; #ifndef PRODUCT -void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], size_t total_refs) { +void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { if (!log_is_enabled(Trace, gc, ref)) { return; } stringStream st; - for (uint i = 0; i < _max_num_q; ++i) { + for (uint i = 0; i < active_length; ++i) { st.print(SIZE_FORMAT " ", ref_lists[i].length()); } log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); +#ifdef ASSERT + for (uint i = active_length; i < _max_num_q; i++) { + assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", + ref_lists[i].length(), i); + } +#endif } #endif +void ReferenceProcessor::set_active_mt_degree(uint v) { + _num_q = v; + _next_id = 0; +} + // Balances reference queues. // Move entries from all queues[0, 1, ..., _max_num_q-1] to // queues[0, 1, ..., _num_q-1] because only the first _num_q @@ -721,8 +720,8 @@ for (uint i = 0; i < _max_num_q; ++i) { total_refs += ref_lists[i].length(); - } - log_reflist_counts(ref_lists, total_refs); + } + log_reflist_counts(ref_lists, _max_num_q, total_refs); size_t avg_refs = total_refs / _num_q + 1; uint to_idx = 0; for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { @@ -784,10 +783,10 @@ } #ifdef ASSERT size_t balanced_total_refs = 0; - for (uint i = 0; i < _max_num_q; ++i) { + for (uint i = 0; i < _num_q; ++i) { balanced_total_refs += ref_lists[i].length(); - } - log_reflist_counts(ref_lists, balanced_total_refs); + } + log_reflist_counts(ref_lists, _num_q, balanced_total_refs); assert(total_refs == balanced_total_refs, "Balancing was incomplete"); #endif } @@ -881,7 +880,7 @@ id = next_id(); } } - assert(id < _max_num_q, "Id is out-of-bounds (call Freud?)"); + assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); // Get the discovered queue to which we will add DiscoveredList* list = NULL; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/referenceProcessor.hpp --- a/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -225,7 +225,7 @@ uint num_q() { return _num_q; } uint max_num_q() { return _max_num_q; } - void set_active_mt_degree(uint v) { _num_q = v; } + void set_active_mt_degree(uint v); DiscoveredList* discovered_refs() { return _discovered_refs; } @@ -326,9 +326,11 @@ // round-robin mod _num_q (not: _not_ mode _max_num_q) uint next_id() { uint id = _next_id; + assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); if (++_next_id == _num_q) { _next_id = 0; } + assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q); return id; } DiscoveredList* get_discovered_list(ReferenceType rt); @@ -340,7 +342,7 @@ // Calculate the number of jni handles. size_t count_jni_refs(); - void log_reflist_counts(DiscoveredList ref_lists[], size_t total_count) PRODUCT_RETURN; + void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN; // Balances reference queues. void balance_queues(DiscoveredList ref_lists[]); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/space.cpp --- a/hotspot/src/share/vm/gc/shared/space.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/space.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,7 +30,6 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" -#include "gc/shared/liveRange.hpp" #include "gc/shared/space.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/spaceDecorator.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/space.inline.hpp --- a/hotspot/src/share/vm/gc/shared/space.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/space.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,7 +28,6 @@ #include "gc/serial/markSweep.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/generation.hpp" -#include "gc/shared/liveRange.hpp" #include "gc/shared/space.hpp" #include "gc/shared/spaceDecorator.hpp" #include "memory/universe.hpp" @@ -117,9 +116,6 @@ HeapWord* end_of_live= q; // One byte beyond the last byte of the last // live object. HeapWord* first_dead = space->end(); // The first dead object. - LiveRange* liveRange = NULL; // The current live range, recorded in the - // first header of preceding free area. - space->_first_dead = first_dead; const intx interval = PrefetchScanIntervalInBytes; @@ -158,16 +154,8 @@ // otherwise, it really is a free region. - // for the previous LiveRange, record the end of the live objects. - if (liveRange) { - liveRange->set_end(q); - } - - // record the current LiveRange object. - // liveRange->start() is overlaid on the mark word. - liveRange = (LiveRange*)q; - liveRange->set_start(end); - liveRange->set_end(end); + // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. + (*(HeapWord**)q) = end; // see if this is the first dead region. if (q < first_dead) { @@ -180,9 +168,6 @@ } assert(q == t, "just checking"); - if (liveRange != NULL) { - liveRange->set_end(q); - } space->_end_of_live = end_of_live; if (end_of_live < first_dead) { first_dead = end_of_live; @@ -227,9 +212,9 @@ if (space->_first_dead == t) { q = t; } else { - // $$$ This is funky. Using this to read the previously written - // LiveRange. See also use below. - q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer(); + // The first dead object is no longer an object. At that memory address, + // there is a pointer to the first live object that the previous phase found. + q = *((HeapWord**)(space->_first_dead)); } } @@ -247,11 +232,10 @@ debug_only(prev_q = q); q += size; } else { - // q is not a live object, so its mark should point at the next - // live object debug_only(prev_q = q); - q = (HeapWord*) oop(q)->mark()->decode_pointer(); - assert(q > prev_q, "we should be moving forward through memory"); + // q is not a live object, instead it points at the next live object + q = *(HeapWord**)q; + assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q)); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/spaceDecorator.cpp --- a/hotspot/src/share/vm/gc/shared/spaceDecorator.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/spaceDecorator.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -84,9 +84,7 @@ void SpaceMangler::mangle_region(MemRegion mr) { assert(ZapUnusedHeapArea, "Mangling should not be in use"); #ifdef ASSERT - log_develop_trace(gc)("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end())); Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord); - log_develop_trace(gc)("Mangling done."); #endif } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/taskqueue.hpp --- a/hotspot/src/share/vm/gc/shared/taskqueue.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/taskqueue.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -248,7 +248,6 @@ template class GenericTaskQueue: public TaskQueueSuper { - ArrayAllocator _array_allocator; protected: typedef typename TaskQueueSuper::Age Age; typedef typename TaskQueueSuper::idx_t idx_t; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/taskqueue.inline.hpp --- a/hotspot/src/share/vm/gc/shared/taskqueue.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/taskqueue.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -44,12 +44,13 @@ template inline void GenericTaskQueue::initialize() { - _elems = _array_allocator.allocate(N); + _elems = ArrayAllocator::allocate(N); } template inline GenericTaskQueue::~GenericTaskQueue() { - FREE_C_HEAP_ARRAY(E, _elems); + assert(false, "This code is currently never called"); + ArrayAllocator::free(const_cast(_elems), N); } template diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp --- a/hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -242,7 +242,7 @@ } void ThreadLocalAllocBuffer::print_stats(const char* tag) { - LogHandle(gc, tlab) log; + Log(gc, tlab) log; if (!log.is_trace()) { return; } @@ -385,7 +385,7 @@ } void GlobalTLABStats::print() { - LogHandle(gc, tlab) log; + Log(gc, tlab) log; if (!log.is_debug()) { return; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.inline.hpp --- a/hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/threadLocalAllocBuffer.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ // unsafe_max_tlab_alloc is just a hint. const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize; - size_t new_tlab_size = MIN2(available_size, desired_size() + aligned_obj_size); + size_t new_tlab_size = MIN3(available_size, desired_size() + aligned_obj_size, max_size()); // Make sure there's enough room for object and filler int[]. const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/vmGCOperations.cpp --- a/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,8 @@ #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/vmGCOperations.hpp" +#include "logging/log.hpp" #include "memory/oopFactory.hpp" -#include "logging/log.hpp" -#include "oops/instanceKlass.hpp" -#include "oops/instanceRefKlass.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" @@ -64,14 +62,11 @@ } void VM_GC_Operation::acquire_pending_list_lock() { - // we may enter this with pending exception set - InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock); + _pending_list_locker.lock(); } - void VM_GC_Operation::release_and_notify_pending_list_lock() { - - InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock); + _pending_list_locker.unlock(); } // Allocations may fail in several threads at about the same time, @@ -160,7 +155,7 @@ // be about to attempt holds value for us only // if it happens now and not if it happens in the eventual // future. - warning("GC locker is held; pre-dump GC was skipped"); + log_warning(gc)("GC locker is held; pre-dump GC was skipped"); } } HeapInspection inspect(_csv_format, _print_help, _print_class_stats, @@ -276,12 +271,8 @@ return; } - // If expansion failed, do a last-ditch collection and try allocating - // again. A last-ditch collection will clear softrefs. This - // behavior is similar to the last-ditch collection done for perm - // gen when it was full and a collection for failed allocation - // did not free perm gen space. - heap->collect_as_vm_thread(GCCause::_last_ditch_collection); + // If expansion failed, do a collection clearing soft references. + heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs); _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); if (_result != NULL) { return; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/vmGCOperations.hpp --- a/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "memory/heapInspection.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/handles.hpp" @@ -69,8 +70,10 @@ // class VM_GC_Operation: public VM_Operation { + private: + ReferencePendingListLocker _pending_list_locker; + protected: - BasicLock _pending_list_basic_lock; // for refs pending list notification (PLL) uint _gc_count_before; // gc count before acquiring PLL uint _full_gc_count_before; // full gc count before acquiring PLL bool _full; // whether a "full" collection diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/workgroup.cpp --- a/hotspot/src/share/vm/gc/shared/workgroup.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,12 +40,7 @@ // initialization of the workers and report such to the // caller. bool AbstractWorkGang::initialize_workers() { - - if (TraceWorkGang) { - tty->print_cr("Constructing work gang %s with %d threads", - name(), - total_workers()); - } + log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers()); _workers = NEW_C_HEAP_ARRAY(AbstractGangWorker*, total_workers(), mtInternal); if (_workers == NULL) { vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array."); @@ -279,10 +274,7 @@ this->initialize_named_thread(); assert(_gang != NULL, "No gang to run in"); os::set_priority(this, NearMaxPriority); - if (TraceWorkGang) { - tty->print_cr("Running gang worker for gang %s id %u", - gang()->name(), id()); - } + log_develop_trace(gc, workgang)("Running gang worker for gang %s id %u", gang()->name(), id()); // The VM thread should not execute here because MutexLocker's are used // as (opposed to MutexLockerEx's). assert(!Thread::current()->is_VM_thread(), "VM thread should not be part" @@ -311,27 +303,14 @@ gang()->dispatcher()->worker_done_with_task(); } -void GangWorker::print_task_started(WorkData data) { - if (TraceWorkGang) { - tty->print_cr("Running work gang %s task %s worker %u", name(), data._task->name(), data._worker_id); - } -} +void GangWorker::run_task(WorkData data) { + GCIdMark gc_id_mark(data._task->gc_id()); + log_develop_trace(gc, workgang)("Running work gang: %s task: %s worker: %u", name(), data._task->name(), data._worker_id); -void GangWorker::print_task_done(WorkData data) { - if (TraceWorkGang) { - tty->print_cr("\nFinished work gang %s task %s worker %u", name(), data._task->name(), data._worker_id); - Thread* me = Thread::current(); - tty->print_cr(" T: " PTR_FORMAT " VM_thread: %d", p2i(me), me->is_VM_thread()); - } -} - -void GangWorker::run_task(WorkData data) { - print_task_started(data); - - GCIdMark gc_id_mark(data._task->gc_id()); data._task->work(data._worker_id); - print_task_done(data); + log_develop_trace(gc, workgang)("Finished work gang: %s task: %s worker: %u thread: " PTR_FORMAT, + name(), data._task->name(), data._worker_id, p2i(Thread::current())); } void GangWorker::loop() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/gc/shared/workgroup.hpp --- a/hotspot/src/share/vm/gc/shared/workgroup.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,7 +152,7 @@ _active_workers = MAX2(1U, _active_workers); assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers, "Unless dynamic should use total workers"); - log_info(gc, task)("GC Workers: %d", _active_workers); + log_info(gc, task)("GC Workers: using %d out of %d", _active_workers, _total_workers); } // Return the Ith worker. @@ -234,9 +234,6 @@ void run_task(WorkData work); void signal_task_done(); - void print_task_started(WorkData data); - void print_task_done(WorkData data); - WorkGang* gang() const { return (WorkGang*)_gang; } }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -138,11 +138,11 @@ BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ if (TraceBytecodes) { \ - CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ - topOfStack[Interpreter::expr_index_at(1)], \ - topOfStack[Interpreter::expr_index_at(2)]), \ - handle_exception); \ - } \ + CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \ + topOfStack[Interpreter::expr_index_at(1)], \ + topOfStack[Interpreter::expr_index_at(2)]), \ + handle_exception); \ + } \ } #endif @@ -632,9 +632,11 @@ if (_compiling) { MethodCounters* mcs; GET_METHOD_COUNTERS(mcs); +#if COMPILER2_OR_JVMCI if (ProfileInterpreter) { METHOD->increment_interpreter_invocation_count(THREAD); } +#endif mcs->invocation_counter()->increment(); if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/cppInterpreter.cpp --- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,7 +27,8 @@ #include "interpreter/cppInterpreterGenerator.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" -#include "runtime/logTimer.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/timerTrace.hpp" #ifdef CC_INTERP @@ -43,7 +44,7 @@ // generate interpreter { ResourceMark rm; - TraceStartupTime timer("Interpreter generation"); + TraceTime timer("Interpreter generation", TRACETIME_LOG(Info, startuptime)); int code_size = InterpreterCodeSize; NOT_PRODUCT(code_size *= 4;) // debug uses extra interpreter code space _code = new StubQueue(new InterpreterCodeletInterface, code_size, NULL, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/interpreterRuntime.cpp --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,6 +37,7 @@ #include "interpreter/templateTable.hpp" #include "logging/log.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/constantPool.hpp" #include "oops/instanceKlass.hpp" @@ -173,9 +174,6 @@ IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) - // Note: no oopHandle for pool & klass needed since they are not used - // anymore after new_objArray() and no GC can happen before. - // (This may have to change if this code changes!) Klass* klass = pool->klass_at(index, CHECK); objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK); thread->set_vm_result(obj); @@ -523,8 +521,10 @@ #ifndef CC_INTERP continuation = Interpreter::remove_activation_entry(); #endif +#if COMPILER2_OR_JVMCI // Count this for compilation purposes h_method->interpreter_throwout_increment(THREAD); +#endif } else { // handler in this method => change bci/bcp to handler bci/bcp and continue there handler_pc = h_method->code_base() + handler_bci; @@ -1414,3 +1414,17 @@ } IRT_END #endif // INCLUDE_JVMTI + +#ifndef PRODUCT +// This must be a IRT_LEAF function because the interpreter must save registers on x86 to +// call this, which changes rsp and makes the interpreter's expression stack not walkable. +// The generated code still uses call_VM because that will set up the frame pointer for +// bcp and method. +IRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) + const frame f = thread->last_frame(); + assert(f.is_interpreted_frame(), "must be an interpreted frame"); + methodHandle mh(thread, f.interpreter_frame_method()); + BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); + return preserve_this_value; +IRT_END +#endif // !PRODUCT diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/interpreterRuntime.hpp --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -166,6 +166,9 @@ static void popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address); #endif + // bytecode tracing is only used by the TraceBytecodes + static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0; + // Platform dependent stuff #ifdef TARGET_ARCH_x86 # include "interpreterRT_x86.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/linkResolver.cpp --- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -757,9 +757,9 @@ ResourceMark rm; outputStream* st; if (logitables) { - st = LogHandle(itables)::trace_stream(); + st = Log(itables)::trace_stream(); } else { - st = LogHandle(vtables)::trace_stream(); + st = Log(vtables)::trace_stream(); } st->print("%s%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", prefix, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/templateInterpreter.cpp --- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -31,7 +31,7 @@ #include "interpreter/templateInterpreterGenerator.hpp" #include "interpreter/templateTable.hpp" #include "memory/resourceArea.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #ifndef CC_INTERP @@ -49,7 +49,7 @@ // generate interpreter { ResourceMark rm; - TraceStartupTime timer("Interpreter generation"); + TraceTime timer("Interpreter generation", TRACETIME_LOG(Info, startuptime)); int code_size = InterpreterCodeSize; NOT_PRODUCT(code_size *= 4;) // debug uses extra interpreter code space #if INCLUDE_JVMTI diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/interpreter/templateTable.cpp --- a/hotspot/src/share/vm/interpreter/templateTable.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/interpreter/templateTable.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -26,7 +26,7 @@ #include "gc/shared/collectedHeap.hpp" #include "interpreter/interp_masm.hpp" #include "interpreter/templateTable.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #ifdef CC_INTERP @@ -245,7 +245,7 @@ if (_is_initialized) return; // Initialize table - TraceStartupTime timer("TemplateTable initialization"); + TraceTime timer("TemplateTable initialization", TRACETIME_LOG(Info, startuptime)); _bs = Universe::heap()->barrier_set(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/jvmci/jvmciCompiler.cpp --- a/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/jvmci/jvmciCompiler.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ #include "precompiled.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/handles.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp --- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,6 +27,7 @@ #include "code/scopeDesc.hpp" #include "interpreter/linkResolver.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/generateOopMap.hpp" #include "oops/fieldStreams.hpp" #include "oops/oop.inline.hpp" @@ -48,6 +49,7 @@ #include "gc/g1/heapRegion.hpp" #include "runtime/javaCalls.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframe.hpp" #include "runtime/vframe_hp.hpp" #include "runtime/vmStructs.hpp" @@ -1432,65 +1434,65 @@ #define METASPACE_METHOD_DATA "J" JNINativeMethod CompilerToVM::methods[] = { - {CC"getBytecode", CC"("HS_RESOLVED_METHOD")[B", FN_PTR(getBytecode)}, - {CC"getExceptionTableStart", CC"("HS_RESOLVED_METHOD")J", FN_PTR(getExceptionTableStart)}, - {CC"getExceptionTableLength", CC"("HS_RESOLVED_METHOD")I", FN_PTR(getExceptionTableLength)}, - {CC"findUniqueConcreteMethod", CC"("HS_RESOLVED_KLASS HS_RESOLVED_METHOD")"HS_RESOLVED_METHOD, FN_PTR(findUniqueConcreteMethod)}, - {CC"getImplementor", CC"("HS_RESOLVED_KLASS")"HS_RESOLVED_KLASS, FN_PTR(getImplementor)}, - {CC"getStackTraceElement", CC"("HS_RESOLVED_METHOD"I)"STACK_TRACE_ELEMENT, FN_PTR(getStackTraceElement)}, - {CC"methodIsIgnoredBySecurityStackWalk", CC"("HS_RESOLVED_METHOD")Z", FN_PTR(methodIsIgnoredBySecurityStackWalk)}, - {CC"doNotInlineOrCompile", CC"("HS_RESOLVED_METHOD")V", FN_PTR(doNotInlineOrCompile)}, - {CC"canInlineMethod", CC"("HS_RESOLVED_METHOD")Z", FN_PTR(canInlineMethod)}, - {CC"shouldInlineMethod", CC"("HS_RESOLVED_METHOD")Z", FN_PTR(shouldInlineMethod)}, - {CC"lookupType", CC"("STRING CLASS"Z)"HS_RESOLVED_KLASS, FN_PTR(lookupType)}, - {CC"lookupNameInPool", CC"("HS_CONSTANT_POOL"I)"STRING, FN_PTR(lookupNameInPool)}, - {CC"lookupNameAndTypeRefIndexInPool", CC"("HS_CONSTANT_POOL"I)I", FN_PTR(lookupNameAndTypeRefIndexInPool)}, - {CC"lookupSignatureInPool", CC"("HS_CONSTANT_POOL"I)"STRING, FN_PTR(lookupSignatureInPool)}, - {CC"lookupKlassRefIndexInPool", CC"("HS_CONSTANT_POOL"I)I", FN_PTR(lookupKlassRefIndexInPool)}, - {CC"lookupKlassInPool", CC"("HS_CONSTANT_POOL"I)Ljava/lang/Object;", FN_PTR(lookupKlassInPool)}, - {CC"lookupAppendixInPool", CC"("HS_CONSTANT_POOL"I)"OBJECT, FN_PTR(lookupAppendixInPool)}, - {CC"lookupMethodInPool", CC"("HS_CONSTANT_POOL"IB)"HS_RESOLVED_METHOD, FN_PTR(lookupMethodInPool)}, - {CC"constantPoolRemapInstructionOperandFromCache", CC"("HS_CONSTANT_POOL"I)I", FN_PTR(constantPoolRemapInstructionOperandFromCache)}, - {CC"resolveConstantInPool", CC"("HS_CONSTANT_POOL"I)"OBJECT, FN_PTR(resolveConstantInPool)}, - {CC"resolvePossiblyCachedConstantInPool", CC"("HS_CONSTANT_POOL"I)"OBJECT, FN_PTR(resolvePossiblyCachedConstantInPool)}, - {CC"resolveTypeInPool", CC"("HS_CONSTANT_POOL"I)"HS_RESOLVED_KLASS, FN_PTR(resolveTypeInPool)}, - {CC"resolveFieldInPool", CC"("HS_CONSTANT_POOL"IB[J)"HS_RESOLVED_KLASS, FN_PTR(resolveFieldInPool)}, - {CC"resolveInvokeDynamicInPool", CC"("HS_CONSTANT_POOL"I)V", FN_PTR(resolveInvokeDynamicInPool)}, - {CC"resolveInvokeHandleInPool", CC"("HS_CONSTANT_POOL"I)V", FN_PTR(resolveInvokeHandleInPool)}, - {CC"resolveMethod", CC"("HS_RESOLVED_KLASS HS_RESOLVED_METHOD HS_RESOLVED_KLASS")"HS_RESOLVED_METHOD, FN_PTR(resolveMethod)}, - {CC"getVtableIndexForInterfaceMethod", CC"("HS_RESOLVED_KLASS HS_RESOLVED_METHOD")I", FN_PTR(getVtableIndexForInterfaceMethod)}, - {CC"getClassInitializer", CC"("HS_RESOLVED_KLASS")"HS_RESOLVED_METHOD, FN_PTR(getClassInitializer)}, - {CC"hasFinalizableSubclass", CC"("HS_RESOLVED_KLASS")Z", FN_PTR(hasFinalizableSubclass)}, - {CC"getMaxCallTargetOffset", CC"(J)J", FN_PTR(getMaxCallTargetOffset)}, - {CC"getResolvedJavaMethodAtSlot", CC"("CLASS"I)"HS_RESOLVED_METHOD, FN_PTR(getResolvedJavaMethodAtSlot)}, - {CC"getResolvedJavaMethod", CC"(Ljava/lang/Object;J)"HS_RESOLVED_METHOD, FN_PTR(getResolvedJavaMethod)}, - {CC"getConstantPool", CC"(Ljava/lang/Object;J)"HS_CONSTANT_POOL, FN_PTR(getConstantPool)}, - {CC"getResolvedJavaType", CC"(Ljava/lang/Object;JZ)"HS_RESOLVED_KLASS, FN_PTR(getResolvedJavaType)}, - {CC"initializeConfiguration", CC"("HS_CONFIG")J", FN_PTR(initializeConfiguration)}, - {CC"installCode", CC"("TARGET_DESCRIPTION HS_COMPILED_CODE INSTALLED_CODE HS_SPECULATION_LOG")I", FN_PTR(installCode)}, - {CC"getMetadata", CC"("TARGET_DESCRIPTION HS_COMPILED_CODE HS_METADATA")I", FN_PTR(getMetadata)}, - {CC"resetCompilationStatistics", CC"()V", FN_PTR(resetCompilationStatistics)}, - {CC"disassembleCodeBlob", CC"("INSTALLED_CODE")"STRING, FN_PTR(disassembleCodeBlob)}, - {CC"executeInstalledCode", CC"(["OBJECT INSTALLED_CODE")"OBJECT, FN_PTR(executeInstalledCode)}, - {CC"getLineNumberTable", CC"("HS_RESOLVED_METHOD")[J", FN_PTR(getLineNumberTable)}, - {CC"getLocalVariableTableStart", CC"("HS_RESOLVED_METHOD")J", FN_PTR(getLocalVariableTableStart)}, - {CC"getLocalVariableTableLength", CC"("HS_RESOLVED_METHOD")I", FN_PTR(getLocalVariableTableLength)}, - {CC"reprofile", CC"("HS_RESOLVED_METHOD")V", FN_PTR(reprofile)}, - {CC"invalidateInstalledCode", CC"("INSTALLED_CODE")V", FN_PTR(invalidateInstalledCode)}, - {CC"readUncompressedOop", CC"(J)"OBJECT, FN_PTR(readUncompressedOop)}, - {CC"collectCounters", CC"()[J", FN_PTR(collectCounters)}, - {CC"allocateCompileId", CC"("HS_RESOLVED_METHOD"I)I", FN_PTR(allocateCompileId)}, - {CC"isMature", CC"("METASPACE_METHOD_DATA")Z", FN_PTR(isMature)}, - {CC"hasCompiledCodeForOSR", CC"("HS_RESOLVED_METHOD"II)Z", FN_PTR(hasCompiledCodeForOSR)}, - {CC"getSymbol", CC"(J)"STRING, FN_PTR(getSymbol)}, - {CC"lookupSymbol", CC"("STRING")J", FN_PTR(lookupSymbol)}, - {CC"getNextStackFrame", CC"("HS_STACK_FRAME_REF "["RESOLVED_METHOD"I)"HS_STACK_FRAME_REF, FN_PTR(getNextStackFrame)}, - {CC"materializeVirtualObjects", CC"("HS_STACK_FRAME_REF"Z)V", FN_PTR(materializeVirtualObjects)}, - {CC"shouldDebugNonSafepoints", CC"()Z", FN_PTR(shouldDebugNonSafepoints)}, - {CC"writeDebugOutput", CC"([BII)V", FN_PTR(writeDebugOutput)}, - {CC"flushDebugOutput", CC"()V", FN_PTR(flushDebugOutput)}, - {CC"methodDataProfileDataSize", CC"(JI)I", FN_PTR(methodDataProfileDataSize)}, - {CC"interpreterFrameSize", CC"("BYTECODE_FRAME")I", FN_PTR(interpreterFrameSize)}, + {CC "getBytecode", CC "(" HS_RESOLVED_METHOD ")[B", FN_PTR(getBytecode)}, + {CC "getExceptionTableStart", CC "(" HS_RESOLVED_METHOD ")J", FN_PTR(getExceptionTableStart)}, + {CC "getExceptionTableLength", CC "(" HS_RESOLVED_METHOD ")I", FN_PTR(getExceptionTableLength)}, + {CC "findUniqueConcreteMethod", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD ")" HS_RESOLVED_METHOD, FN_PTR(findUniqueConcreteMethod)}, + {CC "getImplementor", CC "(" HS_RESOLVED_KLASS ")" HS_RESOLVED_KLASS, FN_PTR(getImplementor)}, + {CC "getStackTraceElement", CC "(" HS_RESOLVED_METHOD "I)" STACK_TRACE_ELEMENT, FN_PTR(getStackTraceElement)}, + {CC "methodIsIgnoredBySecurityStackWalk", CC "(" HS_RESOLVED_METHOD ")Z", FN_PTR(methodIsIgnoredBySecurityStackWalk)}, + {CC "doNotInlineOrCompile", CC "(" HS_RESOLVED_METHOD ")V", FN_PTR(doNotInlineOrCompile)}, + {CC "canInlineMethod", CC "(" HS_RESOLVED_METHOD ")Z", FN_PTR(canInlineMethod)}, + {CC "shouldInlineMethod", CC "(" HS_RESOLVED_METHOD ")Z", FN_PTR(shouldInlineMethod)}, + {CC "lookupType", CC "(" STRING CLASS "Z)" HS_RESOLVED_KLASS, FN_PTR(lookupType)}, + {CC "lookupNameInPool", CC "(" HS_CONSTANT_POOL "I)" STRING, FN_PTR(lookupNameInPool)}, + {CC "lookupNameAndTypeRefIndexInPool", CC "(" HS_CONSTANT_POOL "I)I", FN_PTR(lookupNameAndTypeRefIndexInPool)}, + {CC "lookupSignatureInPool", CC "(" HS_CONSTANT_POOL "I)" STRING, FN_PTR(lookupSignatureInPool)}, + {CC "lookupKlassRefIndexInPool", CC "(" HS_CONSTANT_POOL "I)I", FN_PTR(lookupKlassRefIndexInPool)}, + {CC "lookupKlassInPool", CC "(" HS_CONSTANT_POOL "I)Ljava/lang/Object;", FN_PTR(lookupKlassInPool)}, + {CC "lookupAppendixInPool", CC "(" HS_CONSTANT_POOL "I)" OBJECT, FN_PTR(lookupAppendixInPool)}, + {CC "lookupMethodInPool", CC "(" HS_CONSTANT_POOL "IB)" HS_RESOLVED_METHOD, FN_PTR(lookupMethodInPool)}, + {CC "constantPoolRemapInstructionOperandFromCache", CC "(" HS_CONSTANT_POOL "I)I", FN_PTR(constantPoolRemapInstructionOperandFromCache)}, + {CC "resolveConstantInPool", CC "(" HS_CONSTANT_POOL "I)" OBJECT, FN_PTR(resolveConstantInPool)}, + {CC "resolvePossiblyCachedConstantInPool", CC "(" HS_CONSTANT_POOL "I)" OBJECT, FN_PTR(resolvePossiblyCachedConstantInPool)}, + {CC "resolveTypeInPool", CC "(" HS_CONSTANT_POOL "I)" HS_RESOLVED_KLASS, FN_PTR(resolveTypeInPool)}, + {CC "resolveFieldInPool", CC "(" HS_CONSTANT_POOL "IB[J)" HS_RESOLVED_KLASS, FN_PTR(resolveFieldInPool)}, + {CC "resolveInvokeDynamicInPool", CC "(" HS_CONSTANT_POOL "I)V", FN_PTR(resolveInvokeDynamicInPool)}, + {CC "resolveInvokeHandleInPool", CC "(" HS_CONSTANT_POOL "I)V", FN_PTR(resolveInvokeHandleInPool)}, + {CC "resolveMethod", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(resolveMethod)}, + {CC "getVtableIndexForInterfaceMethod", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD ")I", FN_PTR(getVtableIndexForInterfaceMethod)}, + {CC "getClassInitializer", CC "(" HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(getClassInitializer)}, + {CC "hasFinalizableSubclass", CC "(" HS_RESOLVED_KLASS ")Z", FN_PTR(hasFinalizableSubclass)}, + {CC "getMaxCallTargetOffset", CC "(J)J", FN_PTR(getMaxCallTargetOffset)}, + {CC "getResolvedJavaMethodAtSlot", CC "(" CLASS "I)" HS_RESOLVED_METHOD, FN_PTR(getResolvedJavaMethodAtSlot)}, + {CC "getResolvedJavaMethod", CC "(Ljava/lang/Object;J)" HS_RESOLVED_METHOD, FN_PTR(getResolvedJavaMethod)}, + {CC "getConstantPool", CC "(Ljava/lang/Object;J)" HS_CONSTANT_POOL, FN_PTR(getConstantPool)}, + {CC "getResolvedJavaType", CC "(Ljava/lang/Object;JZ)" HS_RESOLVED_KLASS, FN_PTR(getResolvedJavaType)}, + {CC "initializeConfiguration", CC "(" HS_CONFIG ")J", FN_PTR(initializeConfiguration)}, + {CC "installCode", CC "(" TARGET_DESCRIPTION HS_COMPILED_CODE INSTALLED_CODE HS_SPECULATION_LOG ")I", FN_PTR(installCode)}, + {CC "getMetadata", CC "(" TARGET_DESCRIPTION HS_COMPILED_CODE HS_METADATA ")I", FN_PTR(getMetadata)}, + {CC "resetCompilationStatistics", CC "()V", FN_PTR(resetCompilationStatistics)}, + {CC "disassembleCodeBlob", CC "(" INSTALLED_CODE ")" STRING, FN_PTR(disassembleCodeBlob)}, + {CC "executeInstalledCode", CC "([" OBJECT INSTALLED_CODE ")" OBJECT, FN_PTR(executeInstalledCode)}, + {CC "getLineNumberTable", CC "(" HS_RESOLVED_METHOD ")[J", FN_PTR(getLineNumberTable)}, + {CC "getLocalVariableTableStart", CC "(" HS_RESOLVED_METHOD ")J", FN_PTR(getLocalVariableTableStart)}, + {CC "getLocalVariableTableLength", CC "(" HS_RESOLVED_METHOD ")I", FN_PTR(getLocalVariableTableLength)}, + {CC "reprofile", CC "(" HS_RESOLVED_METHOD ")V", FN_PTR(reprofile)}, + {CC "invalidateInstalledCode", CC "(" INSTALLED_CODE ")V", FN_PTR(invalidateInstalledCode)}, + {CC "readUncompressedOop", CC "(J)" OBJECT, FN_PTR(readUncompressedOop)}, + {CC "collectCounters", CC "()[J", FN_PTR(collectCounters)}, + {CC "allocateCompileId", CC "(" HS_RESOLVED_METHOD "I)I", FN_PTR(allocateCompileId)}, + {CC "isMature", CC "(" METASPACE_METHOD_DATA ")Z", FN_PTR(isMature)}, + {CC "hasCompiledCodeForOSR", CC "(" HS_RESOLVED_METHOD "II)Z", FN_PTR(hasCompiledCodeForOSR)}, + {CC "getSymbol", CC "(J)" STRING, FN_PTR(getSymbol)}, + {CC "lookupSymbol", CC "(" STRING ")J", FN_PTR(lookupSymbol)}, + {CC "getNextStackFrame", CC "(" HS_STACK_FRAME_REF "[" RESOLVED_METHOD "I)" HS_STACK_FRAME_REF, FN_PTR(getNextStackFrame)}, + {CC "materializeVirtualObjects", CC "(" HS_STACK_FRAME_REF "Z)V", FN_PTR(materializeVirtualObjects)}, + {CC "shouldDebugNonSafepoints", CC "()Z", FN_PTR(shouldDebugNonSafepoints)}, + {CC "writeDebugOutput", CC "([BII)V", FN_PTR(writeDebugOutput)}, + {CC "flushDebugOutput", CC "()V", FN_PTR(flushDebugOutput)}, + {CC "methodDataProfileDataSize", CC "(JI)I", FN_PTR(methodDataProfileDataSize)}, + {CC "interpreterFrameSize", CC "(" BYTECODE_FRAME ")I", FN_PTR(interpreterFrameSize)}, }; int CompilerToVM::methods_count() { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/jvmci/jvmciEnv.cpp --- a/hotspot/src/share/vm/jvmci/jvmciEnv.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/jvmci/jvmciEnv.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" @@ -436,7 +437,7 @@ stringStream st(buffer, O_BUFLEN); deps.print_dependency(witness, true, &st); *failure_detail = st.as_string(); - if (env == NULL || counter_changed) { + if (env == NULL || counter_changed || deps.type() == Dependencies::evol_method) { return JVMCIEnv::dependencies_failed; } else { // The dependencies were invalid at the time of installation diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/jvmci/jvmciRuntime.cpp --- a/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/jvmci/jvmciRuntime.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -34,6 +34,7 @@ #include "jvmci/jvmciEnv.hpp" #include "logging/log.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "prims/jvm.h" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/log.cpp --- a/hotspot/src/share/vm/logging/log.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/log.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,71 +28,13 @@ #ifndef PRODUCT +#include "gc/shared/gcTraceTime.inline.hpp" #include "logging/log.hpp" #include "logging/logConfiguration.hpp" #include "logging/logOutput.hpp" +#include "logging/logStream.inline.hpp" #include "memory/resourceArea.hpp" -void Test_log_length() { - remove("loglengthoutput.txt"); - - // Write long message to output file - ResourceMark rm; - LogHandle(logging) log; - bool success = LogConfiguration::parse_log_arguments("loglengthoutput.txt", "logging=trace", - NULL, NULL, log.error_stream()); - assert(success, "test unable to configure logging"); - log.trace("01:1234567890-" - "02:1234567890-" - "03:1234567890-" - "04:1234567890-" - "05:1234567890-" - "06:1234567890-" - "07:1234567890-" - "08:1234567890-" - "09:1234567890-" - "10:1234567890-" - "11:1234567890-" - "12:1234567890-" - "13:1234567890-" - "14:1234567890-" - "15:1234567890-" - "16:1234567890-" - "17:1234567890-" - "18:1234567890-" - "19:1234567890-" - "20:1234567890-" - "21:1234567890-" - "22:1234567890-" - "23:1234567890-" - "24:1234567890-" - "25:1234567890-" - "26:1234567890-" - "27:1234567890-" - "28:1234567890-" - "29:1234567890-" - "30:1234567890-" - "31:1234567890-" - "32:1234567890-" - "33:1234567890-" - "34:1234567890-" - "35:1234567890-" - "36:1234567890-" - "37:1234567890-"); - LogConfiguration::parse_log_arguments("loglengthoutput.txt", "all=off", - NULL, NULL, log.error_stream()); - - // Look for end of message in output file - FILE* fp = fopen("loglengthoutput.txt", "r"); - assert(fp, "File read error"); - char output[600]; - if (fgets(output, 600, fp) != NULL) { - assert(strstr(output, "37:1234567890-"), "logging print size error"); - } - fclose(fp); - remove("loglengthoutput.txt"); -} - #define assert_str_eq(s1, s2) \ assert(strcmp(s1, s2) == 0, "Expected '%s' to equal '%s'", s1, s2) @@ -102,18 +44,72 @@ #define assert_char_not_in(c, s) \ assert(strchr(s, c) == NULL, "Expected '%s' to *not* contain character '%c'", s, c) +class TestLogFile { + private: + char file_name[256]; + + void set_name(const char* test_name) { + const char* tmpdir = os::get_temp_directory(); + int pos = jio_snprintf(file_name, sizeof(file_name), "%s%svmtest.%s.%d.log", tmpdir, os::file_separator(), test_name, os::current_process_id()); + assert(pos > 0, "too small log file name buffer"); + assert((size_t)pos < sizeof(file_name), "too small log file name buffer"); + } + + public: + TestLogFile(const char* test_name) { + set_name(test_name); + remove(name()); + } + + ~TestLogFile() { + remove(name()); + } + + const char* name() { + return file_name; + } +}; + +class TestLogSavedConfig { + private: + char* _saved_config; + char* _new_output; + Log(logging) _log; + public: + TestLogSavedConfig(const char* apply_output = NULL, const char* apply_setting = NULL) : _new_output(0) { + ResourceMark rm; + _saved_config = os::strdup_check_oom(LogOutput::Stdout->config_string()); + bool success = LogConfiguration::parse_log_arguments("stdout", "all=off", NULL, NULL, _log.error_stream()); + assert(success, "test unable to turn all off"); + + if (apply_output) { + _new_output = os::strdup_check_oom(apply_output); + bool success = LogConfiguration::parse_log_arguments(_new_output, apply_setting, NULL, NULL, _log.error_stream()); + assert(success, "test unable to apply test log configuration"); + } + } + + ~TestLogSavedConfig() { + ResourceMark rm; + if (_new_output) { + bool success = LogConfiguration::parse_log_arguments(_new_output, "all=off", NULL, NULL, _log.error_stream()); + assert(success, "test unable to turn all off"); + os::free(_new_output); + } + + bool success = LogConfiguration::parse_log_arguments("stdout", _saved_config, NULL, NULL, _log.error_stream()); + assert(success, "test unable to restore log configuration"); + os::free(_saved_config); + } +}; + void Test_configure_stdout() { - ResourceMark rm; - LogHandle(logging) log; LogOutput* stdoutput = LogOutput::Stdout; - - // Save current stdout config and clear it - char* saved_config = os::strdup_check_oom(stdoutput->config_string()); - LogConfiguration::parse_log_arguments("stdout", "all=off", NULL, NULL, log.error_stream()); + TestLogSavedConfig tlsc; // Enable 'logging=info', verifying it has been set LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(logging)); - assert_str_eq("logging=info,", stdoutput->config_string()); + assert_str_eq("logging=info", stdoutput->config_string()); assert(log_is_enabled(Info, logging), "logging was not properly enabled"); // Enable 'gc=debug' (no wildcard), verifying no other tags are enabled @@ -131,9 +127,588 @@ LogConfiguration::configure_stdout(LogLevel::Off, false, LOG_TAGS(gc)); LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(logging)); assert_str_eq("all=off", stdoutput->config_string()); +} - // Restore saved configuration - LogConfiguration::parse_log_arguments("stdout", saved_config, NULL, NULL, log.error_stream()); - os::free(saved_config); +static int Test_logconfiguration_subscribe_triggered = 0; + +static void Test_logconfiguration_subscribe_helper() { + Test_logconfiguration_subscribe_triggered++; +} + +void Test_logconfiguration_subscribe() { + ResourceMark rm; + Log(logging) log; + + TestLogSavedConfig log_cfg("stdout", "logging*=trace"); + + LogConfiguration::register_update_listener(&Test_logconfiguration_subscribe_helper); + + LogConfiguration::parse_log_arguments("stdout", "logging=trace", NULL, NULL, log.error_stream()); + assert(Test_logconfiguration_subscribe_triggered == 1, "subscription not triggered (1)"); + + LogConfiguration::configure_stdout(LogLevel::Debug, true, LOG_TAGS(gc)); + assert(Test_logconfiguration_subscribe_triggered == 2, "subscription not triggered (2)"); + + LogConfiguration::disable_logging(); + assert(Test_logconfiguration_subscribe_triggered == 3, "subscription not triggered (3)"); + + // We need to renable stderr error logging since "disable_logging" disable it all. + // TestLogSavedConfig log_cfg will only renable stdout for us. + LogConfiguration::parse_log_arguments("stderr", "all=warning", NULL, NULL, log.error_stream()); + assert(Test_logconfiguration_subscribe_triggered == 4, "subscription not triggered (3)"); +} + +#define LOG_PREFIX_STR "THE_PREFIX " +#define LOG_LINE_STR "a log line" + +size_t Test_log_prefix_prefixer(char* buf, size_t len) { + int ret = jio_snprintf(buf, len, LOG_PREFIX_STR); + assert(ret > 0, "Failed to print prefix. Log buffer too small?"); + return (size_t) ret; +} + +void Test_log_prefix() { + TestLogFile log_file("log_prefix"); + TestLogSavedConfig log_cfg(log_file.name(), "logging+test=trace"); + + log_trace(logging, test)(LOG_LINE_STR); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + char output[1024]; + if (fgets(output, 1024, fp) != NULL) { + assert(strstr(output, LOG_PREFIX_STR LOG_LINE_STR), "logging prefix error"); + } + fclose(fp); +} + +void Test_log_big() { + char big_msg[4096] = {0}; + char Xchar = '~'; + + TestLogFile log_file("log_big"); + TestLogSavedConfig log_cfg(log_file.name(), "logging+test=trace"); + + memset(big_msg, Xchar, sizeof(big_msg) - 1); + + log_trace(logging, test)("%s", big_msg); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + char output[sizeof(big_msg)+128 /*decorators*/ ]; + if (fgets(output, sizeof(output), fp) != NULL) { + assert(strstr(output, LOG_PREFIX_STR), "logging prefix error"); + size_t count = 0; + for (size_t ps = 0 ; output[ps + count] != '\0'; output[ps + count] == Xchar ? count++ : ps++); + assert(count == (sizeof(big_msg) - 1) , "logging msg error"); + } + fclose(fp); +} + +void Test_logtagset_duplicates() { + for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) { + char ts_name[512]; + ts->label(ts_name, sizeof(ts_name), ","); + + // verify that NO_TAG is never followed by a real tag + for (size_t i = 0; i < LogTag::MaxTags; i++) { + if (ts->tag(i) == LogTag::__NO_TAG) { + for (i++; i < LogTag::MaxTags; i++) { + assert(ts->tag(i) == LogTag::__NO_TAG, + "NO_TAG was followed by a real tag (%s) in tagset %s", + LogTag::name(ts->tag(i)), ts_name); + } + } + } + + // verify that there are no duplicate tagsets (same tags in different order) + for (LogTagSet* other = ts->next(); other != NULL; other = other->next()) { + if (ts->ntags() != other->ntags()) { + continue; + } + bool equal = true; + for (size_t i = 0; i < ts->ntags(); i++) { + LogTagType tag = ts->tag(i); + if (!other->contains(tag)) { + equal = false; + break; + } + } + // Since tagsets are implemented using template arguments, using both of + // the (logically equivalent) tagsets (t1, t2) and (t2, t1) somewhere will + // instantiate two different LogTagSetMappings. This causes multiple + // tagset instances to be created for the same logical set. We want to + // avoid this to save time, memory and prevent any confusion around it. + if (equal) { + char other_name[512]; + other->label(other_name, sizeof(other_name), ","); + assert(false, "duplicate LogTagSets found: '%s' vs '%s' " + "(tags must always be specified in the same order for each tagset)", + ts_name, other_name); + } + } + } +} + +#define Test_logtarget_string_literal "First line" + + +static void Test_logtarget_on() { + TestLogFile log_file("log_target"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug"); + + LogTarget(Debug, gc) log; + + assert(log.is_enabled(), "assert"); + + // Log the line and expect it to be available in the output file. + log.print(Test_logtarget_string_literal); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp != NULL, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, Test_logtarget_string_literal) != NULL, "log line missing"); + + fclose(fp); +} + +static void Test_logtarget_off() { + TestLogFile log_file("log_target"); + TestLogSavedConfig tlsc(log_file.name(), "gc=info"); + + LogTarget(Debug, gc) log; + + if (log.is_enabled()) { + // The log config could have been redirected gc=debug to a file. If gc=debug + // is enabled, we can only test that the LogTarget returns the same value + // as the log_is_enabled function. The rest of the test will be ignored. + assert(log.is_enabled() == log_is_enabled(Debug, gc), "assert"); + log_warning(logging)("This test doesn't support runs with -Xlog"); + return; + } + + // Try to log, but expect this to be filtered out. + log.print(Test_logtarget_string_literal); + + // Log a dummy line so that fgets doesn't return NULL because the file is empty. + log_info(gc)("Dummy line"); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp != NULL, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, Test_logtarget_string_literal) == NULL, "log line not missing"); + + fclose(fp); +} + +void Test_logtarget() { + Test_logtarget_on(); + Test_logtarget_off(); +} + + +static void Test_logstream_helper(outputStream* stream) { + TestLogFile log_file("log_stream"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug"); + + // Try to log, but expect this to be filtered out. + stream->print("%d ", 3); stream->print("workers"); stream->cr(); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp != NULL, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, "3 workers") != NULL, "log line missing"); + + fclose(fp); +} + +static void Test_logstream_log() { + Log(gc) log; + LogStream stream(log.debug()); + + Test_logstream_helper(&stream); +} + +static void Test_logstream_logtarget() { + LogTarget(Debug, gc) log; + LogStream stream(log); + + Test_logstream_helper(&stream); +} + +static void Test_logstream_logstreamhandle() { + LogStreamHandle(Debug, gc) stream; + + Test_logstream_helper(&stream); +} + +static void Test_logstream_no_rm() { + ResourceMark rm; + outputStream* stream = LogTarget(Debug, gc)::stream(); + + Test_logstream_helper(stream); +} + +void Test_logstream() { + Test_logstream_log(); + Test_logstream_logtarget(); + Test_logstream_logstreamhandle(); + Test_logstream_no_rm(); +} + +void Test_loghandle_on() { + TestLogFile log_file("log_handle"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug"); + + Log(gc) log; + LogHandle log_handle(log); + + assert(log_handle.is_debug(), "assert"); + + // Try to log trough a LogHandle. + log_handle.debug("%d workers", 3); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, "3 workers") != NULL, "log line missing"); + + fclose(fp); } + +void Test_loghandle_off() { + TestLogFile log_file("log_handle"); + TestLogSavedConfig tlsc(log_file.name(), "gc=info"); + + Log(gc) log; + LogHandle log_handle(log); + + if (log_handle.is_debug()) { + // The log config could have been redirected gc=debug to a file. If gc=debug + // is enabled, we can only test that the LogTarget returns the same value + // as the log_is_enabled function. The rest of the test will be ignored. + assert(log_handle.is_debug() == log_is_enabled(Debug, gc), "assert"); + log_warning(logging)("This test doesn't support runs with -Xlog"); + return; + } + + // Try to log trough a LogHandle. Should fail, since only info is turned on. + log_handle.debug("%d workers", 3); + + // Log a dummy line so that fgets doesn't return NULL because the file is empty. + log_info(gc)("Dummy line"); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, "3 workers") == NULL, "log line missing"); + + fclose(fp); +} + +void Test_loghandle() { + Test_loghandle_on(); + Test_loghandle_off(); +} + +static void Test_logtargethandle_on() { + TestLogFile log_file("log_handle"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug"); + + LogTarget(Debug, gc) log; + LogTargetHandle log_handle(log); + + assert(log_handle.is_enabled(), "assert"); + + // Try to log trough a LogHandle. + log_handle.print("%d workers", 3); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, "3 workers") != NULL, "log line missing"); + + fclose(fp); +} + +static void Test_logtargethandle_off() { + TestLogFile log_file("log_handle"); + TestLogSavedConfig tlsc(log_file.name(), "gc=info"); + + LogTarget(Debug, gc) log; + LogTargetHandle log_handle(log); + + if (log_handle.is_enabled()) { + // The log config could have been redirected gc=debug to a file. If gc=debug + // is enabled, we can only test that the LogTarget returns the same value + // as the log_is_enabled function. The rest of the test will be ignored. + assert(log_handle.is_enabled() == log_is_enabled(Debug, gc), "assert"); + log_warning(logging)("This test doesn't support runs with -Xlog"); + return; + } + + // Try to log trough a LogHandle. Should fail, since only info is turned on. + log_handle.print("%d workers", 3); + + // Log a dummy line so that fgets doesn't return NULL because the file is empty. + log_info(gc)("Dummy line"); + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + assert(strstr(output, "3 workers") == NULL, "log line missing"); + + fclose(fp); +} + +void Test_logtargethandle() { + Test_logtargethandle_on(); + Test_logtargethandle_off(); +} + +static void Test_log_gctracetime_full() { + TestLogFile log_file("log_gctracetime"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug,gc+start=debug"); + + LogTarget(Debug, gc) gc_debug; + LogTarget(Debug, gc, start) gc_start_debug; + + assert(gc_debug.is_enabled(), "assert"); + assert(gc_start_debug.is_enabled(), "assert"); + + { + MutexLocker lock(Heap_lock); // Needed to read heap usage + GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_allocation_failure, true); + } + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc,start] Test GC (Allocation Failure) (2.975s) + assert(strstr(output, "[gc,start") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s)") != NULL, "Incorrect log line"); + + res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc ] Test GC (Allocation Failure) 59M->59M(502M) (2.975s, 2.975s) 0.026ms + assert(strstr(output, "[gc ") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) ") != NULL, "Incorrect log line"); + assert(strstr(output, "M) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s, ") != NULL, "Incorrect log line"); + assert(strstr(output, "s) ") != NULL, "Incorrect log line"); + assert(strstr(output, "ms") != NULL, "Incorrect log line"); + + fclose(fp); +} + +static void Test_log_gctracetime_full_multitag() { + TestLogFile log_file("log_gctracetime"); + TestLogSavedConfig tlsc(log_file.name(), "gc+ref=debug,gc+ref+start=debug"); + + LogTarget(Debug, gc, ref) gc_debug; + LogTarget(Debug, gc, ref, start) gc_start_debug; + + assert(gc_debug.is_enabled(), "assert"); + assert(gc_start_debug.is_enabled(), "assert"); + + { + MutexLocker lock(Heap_lock); // Needed to read heap usage + GCTraceTime(Debug, gc, ref) timer("Test GC", NULL, GCCause::_allocation_failure, true); + } + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc,start] Test GC (Allocation Failure) (2.975s) + assert(strstr(output, "[gc,ref,start") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s)") != NULL, "Incorrect log line"); + + res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc ] Test GC (Allocation Failure) 59M->59M(502M) (2.975s, 2.975s) 0.026ms + assert(strstr(output, "[gc,ref ") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) ") != NULL, "Incorrect log line"); + assert(strstr(output, "M) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s, ") != NULL, "Incorrect log line"); + assert(strstr(output, "s) ") != NULL, "Incorrect log line"); + assert(strstr(output, "ms") != NULL, "Incorrect log line"); + + fclose(fp); +} + +static void Test_log_gctracetime_no_heap() { + TestLogFile log_file("log_gctracetime"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug,gc+start=debug"); + + LogTarget(Debug, gc) gc_debug; + LogTarget(Debug, gc, start) gc_start_debug; + + assert(gc_debug.is_enabled(), "assert"); + assert(gc_start_debug.is_enabled(), "assert"); + + { + GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_allocation_failure, false); + } + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc,start] Test GC (Allocation Failure) (2.975s) + assert(strstr(output, "[gc,start") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s)") != NULL, "Incorrect log line"); + + res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc ] Test GC (Allocation Failure) (2.975s, 2.975s) 0.026ms + assert(strstr(output, "[gc ") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (Allocation Failure) (") != NULL, "Incorrect log line"); + assert(strstr(output, "M) (") == NULL, "Incorrect log line"); + assert(strstr(output, "s, ") != NULL, "Incorrect log line"); + assert(strstr(output, "s) ") != NULL, "Incorrect log line"); + assert(strstr(output, "ms") != NULL, "Incorrect log line"); + + fclose(fp); +} + +static void Test_log_gctracetime_no_cause() { + TestLogFile log_file("log_gctracetime"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug,gc+start=debug"); + + LogTarget(Debug, gc) gc_debug; + LogTarget(Debug, gc, start) gc_start_debug; + + assert(gc_debug.is_enabled(), "assert"); + assert(gc_start_debug.is_enabled(), "assert"); + + { + MutexLocker lock(Heap_lock); // Needed to read heap usage + GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_no_gc, true); + } + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc,start] Test GC (2.975s) + assert(strstr(output, "[gc,start") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (") != NULL, "Incorrect log line"); + assert(strstr(output, "s)") != NULL, "Incorrect log line"); + + res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc ] Test GC 59M->59M(502M) (2.975s, 2.975s) 0.026ms + assert(strstr(output, "[gc ") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC ") != NULL, "Incorrect log line"); + assert(strstr(output, "M) (") != NULL, "Incorrect log line"); + assert(strstr(output, "s, ") != NULL, "Incorrect log line"); + assert(strstr(output, "s) ") != NULL, "Incorrect log line"); + assert(strstr(output, "ms") != NULL, "Incorrect log line"); + + fclose(fp); +} + +static void Test_log_gctracetime_no_heap_no_cause() { + TestLogFile log_file("log_gctracetime"); + TestLogSavedConfig tlsc(log_file.name(), "gc=debug,gc+start=debug"); + + LogTarget(Debug, gc) gc_debug; + LogTarget(Debug, gc, start) gc_start_debug; + + assert(gc_debug.is_enabled(), "assert"); + assert(gc_start_debug.is_enabled(), "assert"); + + { + MutexLocker lock(Heap_lock); // Needed to read heap usage + GCTraceTime(Debug, gc) timer("Test GC", NULL, GCCause::_no_gc, false); + } + + FILE* fp = fopen(log_file.name(), "r"); + assert(fp, "File read error"); + + char output[256 /* Large enough buffer */]; + + char* res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc,start] Test GC (2.975s) + assert(strstr(output, "[gc,start") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (") != NULL, "Incorrect log line"); + assert(strstr(output, "s)") != NULL, "Incorrect log line"); + + res = fgets(output, sizeof(output), fp); + assert(res != NULL, "assert"); + + // [2.975s][debug][gc ] Test GC (2.975s, 2.975s) 0.026ms + assert(strstr(output, "[gc ") != NULL, "Incorrect tag set"); + assert(strstr(output, "] Test GC (") != NULL, "Incorrect log line"); + assert(strstr(output, "M) (") == NULL, "Incorrect log line"); + assert(strstr(output, "s, ") != NULL, "Incorrect log line"); + assert(strstr(output, "s) ") != NULL, "Incorrect log line"); + assert(strstr(output, "ms") != NULL, "Incorrect log line"); + + fclose(fp); +} + +void Test_log_gctracetime() { + Test_log_gctracetime_full(); + Test_log_gctracetime_full_multitag(); + Test_log_gctracetime_no_heap(); + Test_log_gctracetime_no_cause(); + Test_log_gctracetime_no_heap_no_cause(); +} + #endif // PRODUCT diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/log.hpp --- a/hotspot/src/share/vm/logging/log.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/log.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,10 +29,8 @@ #include "logging/logTagSet.hpp" #include "logging/logTag.hpp" #include "memory/allocation.hpp" -#include "memory/allocation.inline.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" -#include "utilities/ostream.hpp" // // Logging macros @@ -44,19 +42,19 @@ // // Note that these macros will not evaluate the arguments unless the logging is enabled. // -#define log_error(...) (!log_is_enabled(Error, __VA_ARGS__)) ? (void)0 : Log::write -#define log_warning(...) (!log_is_enabled(Warning, __VA_ARGS__)) ? (void)0 : Log::write -#define log_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : Log::write -#define log_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : Log::write -#define log_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : Log::write +#define log_error(...) (!log_is_enabled(Error, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_warning(...) (!log_is_enabled(Warning, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : LogImpl::write // Macros for logging that should be excluded in product builds. // Available for levels Info, Debug and Trace. Includes test macro that // evaluates to false in product builds. #ifndef PRODUCT -#define log_develop_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : Log::write -#define log_develop_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : Log::write -#define log_develop_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : Log::write +#define log_develop_info(...) (!log_is_enabled(Info, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_develop_debug(...) (!log_is_enabled(Debug, __VA_ARGS__)) ? (void)0 : LogImpl::write +#define log_develop_trace(...) (!log_is_enabled(Trace, __VA_ARGS__)) ? (void)0 : LogImpl::write #define log_develop_is_enabled(level, ...) log_is_enabled(level, __VA_ARGS__) #else #define DUMMY_ARGUMENT_CONSUMER(...) @@ -67,7 +65,7 @@ #endif // Convenience macro to test if the logging is enabled on the specified level for given tags. -#define log_is_enabled(level, ...) (Log::is_level(LogLevel::level)) +#define log_is_enabled(level, ...) (LogImpl::is_level(LogLevel::level)) // // Log class for more advanced logging scenarios. @@ -78,18 +76,43 @@ // calls to _stream() functions (trace_stream(), debug_stream(), etc). // // Example usage: -// LogHandle(logging) log; +// Log(logging) log; // if (log.is_debug()) { // ... // log.debug("result = %d", result).trace(" tracing info"); // obj->print_on(log.debug_stream()); // } // -#define LogHandle(...) Log +#define Log(...) LogImpl + +// +// Log class that embeds both log tags and a log level. +// +// The class provides a way to write the tags and log level once, +// so that redundant specification of tags or levels can be avoided. +// +// Example usage: +// LogTarget(Debug, gc) out; +// if (out.is_enabled()) { +// ... +// out.print("Worker: %u", i); +// out.print(" data: %d", x); +// ... +// print_stats(out.stream()); +// } +// +#define LogTarget(level, ...) LogTargetImpl + +// Forward declaration to decouple this file from the outputStream API. +class outputStream; +outputStream* create_log_stream(LogLevelType level, LogTagSet* tagset); + +template +class LogTargetImpl; template -class Log VALUE_OBJ_CLASS_SPEC { +class LogImpl VALUE_OBJ_CLASS_SPEC { private: static const size_t LogBufferSize = 512; public: @@ -100,7 +123,7 @@ // Empty constructor to avoid warnings on MSVC about unused variables // when the log instance is only used for static functions. - Log() { + LogImpl() { } static bool is_level(LogLevelType level) { @@ -113,7 +136,7 @@ va_start(args, fmt); vwrite(level, fmt, args); va_end(args); - }; + } template ATTRIBUTE_PRINTF(1, 2) @@ -122,40 +145,19 @@ va_start(args, fmt); vwrite(Level, fmt, args); va_end(args); - }; + } ATTRIBUTE_PRINTF(2, 0) static void vwrite(LogLevelType level, const char* fmt, va_list args) { - char buf[LogBufferSize]; - va_list saved_args; // For re-format on buf overflow. - va_copy(saved_args, args); - size_t prefix_len = LogPrefix::prefix(buf, sizeof(buf)); - // Check that string fits in buffer; resize buffer if necessary - int ret = os::log_vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args); - assert(ret >= 0, "Log message buffer issue"); - if ((size_t)ret >= sizeof(buf)) { - size_t newbuf_len = prefix_len + ret + 1; - char* newbuf = NEW_C_HEAP_ARRAY(char, newbuf_len, mtLogging); - prefix_len = LogPrefix::prefix(newbuf, newbuf_len); - ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args); - assert(ret >= 0, "Log message buffer issue"); - puts(level, newbuf); - FREE_C_HEAP_ARRAY(char, newbuf); - } else { - puts(level, buf); - } - } - - static void puts(LogLevelType level, const char* string) { - LogTagSetMapping::tagset().log(level, string); + LogTagSetMapping::tagset().vwrite(level, fmt, args); } #define LOG_LEVEL(level, name) ATTRIBUTE_PRINTF(2, 0) \ - Log& v##name(const char* fmt, va_list args) { \ + LogImpl& v##name(const char* fmt, va_list args) { \ vwrite(LogLevel::level, fmt, args); \ return *this; \ } \ - Log& name(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { \ + LogImpl& name(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { \ va_list args; \ va_start(args, fmt); \ vwrite(LogLevel::level, fmt, args); \ @@ -166,10 +168,39 @@ return is_level(LogLevel::level); \ } \ static outputStream* name##_stream() { \ - return new logStream(write); \ + return create_log_stream(LogLevel::level, &LogTagSetMapping::tagset()); \ + } \ + static LogTargetImpl* name() { \ + return (LogTargetImpl*)NULL; \ } LOG_LEVEL_LIST #undef LOG_LEVEL }; +// Combines logging tags and a logging level. +template +class LogTargetImpl { +public: + // Empty constructor to avoid warnings on MSVC about unused variables + // when the log instance is only used for static functions. + LogTargetImpl() { + } + + static bool is_enabled() { + return LogImpl::is_level(level); + } + + static void print(const char* fmt, ...) ATTRIBUTE_PRINTF(1, 2) { + va_list args; + va_start(args, fmt); + LogImpl::vwrite(level, fmt, args); + va_end(args); + } + + static outputStream* stream() { + return create_log_stream(level, &LogTagSetMapping::tagset()); + } +}; + #endif // SHARE_VM_LOGGING_LOG_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logConfiguration.cpp --- a/hotspot/src/share/vm/logging/logConfiguration.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logConfiguration.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -40,6 +40,9 @@ LogOutput** LogConfiguration::_outputs = NULL; size_t LogConfiguration::_n_outputs = 0; +LogConfiguration::UpdateListenerFunction* LogConfiguration::_listener_callbacks = NULL; +size_t LogConfiguration::_n_listener_callbacks = 0; + // Stack object to take the lock for configuring the logging. // Should only be held during the critical parts of the configuration // (when calling configure_output or reading/modifying the outputs array). @@ -71,7 +74,7 @@ void LogConfiguration::post_initialize() { LogDiagnosticCommand::registerCommand(); - LogHandle(logging) log; + Log(logging) log; log.info("Log configuration fully initialized."); log_develop_info(logging)("Develop logging is available."); if (log.is_trace()) { @@ -254,6 +257,7 @@ for (size_t i = 0; i < _n_outputs; i++) { disable_output(i); } + notify_update_listeners(); } void LogConfiguration::configure_stdout(LogLevelType level, bool exact_match, ...) { @@ -282,6 +286,7 @@ // Apply configuration to stdout (output #0), with the same decorators as before. ConfigurationLock cl; configure_output(0, expr, LogOutput::Stdout->decorators()); + notify_update_listeners(); } bool LogConfiguration::parse_command_line_arguments(const char* opts) { @@ -373,6 +378,7 @@ } } configure_output(idx, expr, decorators); + notify_update_listeners(); return true; } @@ -471,3 +477,20 @@ } } +void LogConfiguration::register_update_listener(UpdateListenerFunction cb) { + assert(cb != NULL, "Should not register NULL as listener"); + ConfigurationLock cl; + size_t idx = _n_listener_callbacks++; + _listener_callbacks = REALLOC_C_HEAP_ARRAY(UpdateListenerFunction, + _listener_callbacks, + _n_listener_callbacks, + mtLogging); + _listener_callbacks[idx] = cb; +} + +void LogConfiguration::notify_update_listeners() { + assert(ConfigurationLock::current_thread_has_lock(), "notify_update_listeners must be called in ConfigurationLock scope (lock held)"); + for (size_t i = 0; i < _n_listener_callbacks; i++) { + _listener_callbacks[i](); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logConfiguration.hpp --- a/hotspot/src/share/vm/logging/logConfiguration.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logConfiguration.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,10 +37,26 @@ // kept implicitly in the LogTagSets and their LogOutputLists. During configuration the tagsets // are iterated over and updated accordingly. class LogConfiguration : public AllStatic { + public: + // Function for listeners + typedef void (*UpdateListenerFunction)(void); + + // Register callback for config change. + // The callback is always called with ConfigurationLock held, + // hence doing log reconfiguration from the callback will deadlock. + // The main Java thread may call this callback if there is an early registration + // else the attach listener JavaThread, started via diagnostic command, will be executing thread. + // The main purpose of this callback is to see if a loglevel have been changed. + // There is no way to unregister. + static void register_update_listener(UpdateListenerFunction cb); + private: static LogOutput** _outputs; static size_t _n_outputs; + static UpdateListenerFunction* _listener_callbacks; + static size_t _n_listener_callbacks; + // Create a new output. Returns NULL if failed. static LogOutput* new_output(char* name, const char* options, outputStream* errstream); @@ -60,6 +76,9 @@ // Configure output (add or update existing configuration) to log on tag-level combination using specified decorators. static void configure_output(size_t idx, const LogTagLevelExpression& tag_level_expression, const LogDecorators& decorators); + // This should be called after any configuration change while still holding ConfigurationLock + static void notify_update_listeners(); + public: // Initialization and finalization of log configuration, to be run at vm startup and shutdown respectively. static void initialize(jlong vm_start_time); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logFileOutput.cpp --- a/hotspot/src/share/vm/logging/logFileOutput.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logFileOutput.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ } if (fclose(_stream) != 0) { jio_fprintf(defaultStream::error_stream(), "Could not close log file '%s' (%s).\n", - _file_name, strerror(errno)); + _file_name, os::strerror(errno)); } } os::free(_archive_name); @@ -139,7 +139,7 @@ } _stream = fopen(_file_name, FileOpenMode); if (_stream == NULL) { - log_error(logging)("Could not open log file '%s' (%s).\n", _file_name, strerror(errno)); + log_error(logging)("Could not open log file '%s' (%s).\n", _file_name, os::strerror(errno)); return false; } return true; @@ -176,7 +176,7 @@ // Rename the file from ex hotspot.log to hotspot.log.2 if (rename(_file_name, _archive_name) == -1) { jio_fprintf(defaultStream::error_stream(), "Could not rename log file '%s' to '%s' (%s).\n", - _file_name, _archive_name, strerror(errno)); + _file_name, _archive_name, os::strerror(errno)); } } @@ -194,7 +194,7 @@ if (fclose(_stream)) { jio_fprintf(defaultStream::error_stream(), "Error closing file '%s' during log rotation (%s).\n", - _file_name, strerror(errno)); + _file_name, os::strerror(errno)); } // Archive the current log file @@ -204,7 +204,7 @@ _stream = fopen(_file_name, FileOpenMode); if (_stream == NULL) { jio_fprintf(defaultStream::error_stream(), "Could not reopen file '%s' during log rotation (%s).\n", - _file_name, strerror(errno)); + _file_name, os::strerror(errno)); return; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logHandle.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/logging/logHandle.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_LOGGING_LOGHANDLE_HPP +#define SHARE_VM_LOGGING_LOGHANDLE_HPP + +#include "logging/log.hpp" + +// Wraps a Log instance and throws away the template information. +// +// This can be used to pass a Log instance as a parameter without +// polluting the surrounding API with template functions. +class LogHandle { +private: + LogTagSet* _tagset; + +public: + template + LogHandle(const LogImpl& type_carrier) : + _tagset(&LogTagSetMapping::tagset()) {} + + bool is_level(LogLevelType level) { + return _tagset->is_level(level); + } + +#define LOG_LEVEL(level, name) ATTRIBUTE_PRINTF(2, 0) \ + LogHandle& v##name(const char* fmt, va_list args) { \ + _tagset->vwrite(LogLevel::level, fmt, args); \ + return *this; \ + } \ + LogHandle& name(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { \ + va_list args; \ + va_start(args, fmt); \ + _tagset->vwrite(LogLevel::level, fmt, args); \ + va_end(args); \ + return *this; \ + } \ + bool is_##name() { \ + return _tagset->is_level(LogLevel::level); \ + } + LOG_LEVEL_LIST +#undef LOG_LEVEL +}; + +// Wraps a LogTarget instance and throws away the template information. +// +// This can be used to pass a Log instance as a parameter without +// polluting the surrounding API with template functions. +class LogTargetHandle { + friend class LogStream; + +private: + const LogLevelType _level; + LogTagSet* _tagset; + +public: + template + LogTargetHandle(const LogTargetImpl& type_carrier) : + _level(level), + _tagset(&LogTagSetMapping::tagset()) {} + + template + static LogTargetHandle create() { + return LogTargetHandle(LogTargetImpl()); + } + + void print(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { + va_list args; + va_start(args, fmt); + _tagset->vwrite(_level, fmt, args); + va_end(args); + } + + bool is_enabled() const { + return _tagset->is_level(_level); + } + + // Creates a log stream from the information stored in this instance. + // Callers need a ResourceMark on the stack. + outputStream* stream() { + return create_log_stream(_level, _tagset);; + } +}; + +#endif // SHARE_VM_LOGGING_LOGHANDLE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logOutput.cpp --- a/hotspot/src/share/vm/logging/logOutput.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logOutput.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -56,6 +56,11 @@ } size_t offset = strlen(_config_string); + if (offset > 0) { + // Add commas in-between tag and level combinations in the config string + _config_string[offset++] = ','; + } + for (;;) { int ret = ts->label(_config_string + offset, _config_string_buffer_size - offset, "+"); if (ret == -1) { @@ -69,7 +74,7 @@ offset = strlen(_config_string); for (;;) { - int ret = jio_snprintf(_config_string + offset, _config_string_buffer_size - offset, "=%s,", LogLevel::name(level)); + int ret = jio_snprintf(_config_string + offset, _config_string_buffer_size - offset, "=%s", LogLevel::name(level)); if (ret == -1) { _config_string_buffer_size *= 2; _config_string = REALLOC_C_HEAP_ARRAY(char, _config_string, _config_string_buffer_size, mtLogging); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logPrefix.hpp --- a/hotspot/src/share/vm/logging/logPrefix.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logPrefix.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,23 +38,30 @@ // List of prefixes for specific tags and/or tagsets. // Syntax: LOG_PREFIX(, LOG_TAGS()) // Where the prefixer function matches the following signature: size_t (*)(char*, size_t) + +// Prefix function for internal vm test +DEBUG_ONLY(size_t Test_log_prefix_prefixer(char* buf, size_t len);) + #define LOG_PREFIX_LIST \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, age)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, alloc)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, alloc, region)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, barrier)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, classhisto)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction)) \ - LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction, phases)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, cpu)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, cset)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, heap)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, region)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, humongous)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ihop)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, liveness)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, marking)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \ @@ -70,7 +77,9 @@ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, stats)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, time)) \ - LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, tlab)) + DEBUG_ONLY(LOG_PREFIX(Test_log_prefix_prefixer, LOG_TAGS(logging, test))) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, tlab)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, workgang)) // The empty prefix, used when there's no prefix defined. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logStream.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/logging/logStream.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "logging/log.hpp" +#include "logging/logStream.hpp" + +// Create a log stream without an embedded ResourceMark. +// The function is placed here to be called out-of-line in log.hpp. +outputStream* create_log_stream(LogLevelType level, LogTagSet* tagset) { + return new LogStreamNoResourceMark(level, tagset); +} + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logStream.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/logging/logStream.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_LOGGING_LOGSTREAM_HPP +#define SHARE_VM_LOGGING_LOGSTREAM_HPP + +#include "logging/log.hpp" +#include "utilities/ostream.hpp" + +// An output stream that logs to the logging framework. +// Requires a ResourceMark on the stack. +class LogStreamNoResourceMark : public outputStream { +private: + stringStream _current_line; + LogLevelType _level; + LogTagSet* _tagset; + +public: + LogStreamNoResourceMark(LogLevelType level, LogTagSet* tagset) : _level(level), _tagset(tagset) {} + ~LogStreamNoResourceMark() { + guarantee(_current_line.size() == 0, "Buffer not flushed. Missing call to print_cr()?"); + } + + void write(const char* s, size_t len); +}; + +#endif // SHARE_VM_LOGGING_LOGSTREAM_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logStream.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/logging/logStream.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_LOGGING_LOGSTREAM_INLINE_HPP +#define SHARE_VM_LOGGING_LOGSTREAM_INLINE_HPP + +#include "logging/log.hpp" +#include "logging/logHandle.hpp" +#include "logging/logStream.hpp" +#include "memory/resourceArea.hpp" +#include "utilities/ostream.hpp" + +inline void LogStreamNoResourceMark::write(const char* s, size_t len) { + if (len > 0 && s[len - 1] == '\n') { + _current_line.write(s, len - 1); + _tagset->write(_level, "%s", _current_line.as_string()); + _current_line.reset(); + } else { + _current_line.write(s, len); + } + update_position(s, len); +} + +// An output stream that logs to the logging framework, and embeds a ResourceMark. +// +// The class is intended to be stack allocated. +// Care needs to be taken when nested ResourceMarks are used. +class LogStream : public outputStream { +private: + ResourceMark _embedded_resource_mark; + LogStreamNoResourceMark _stream; + +public: + // Constructor to support creation from a LogTarget instance. + // + // LogTarget(Debug, gc) log; + // LogStream(log) stream; + template + LogStream(const LogTargetImpl& type_carrier) : + _embedded_resource_mark(), + _stream(level, &LogTagSetMapping::tagset()) {} + + // Constructor to support creation from typed (likely NULL) pointer. Mostly used by the logging framework. + // + // LogStream stream(log.debug()); + // LogStream stream((LogTargetImpl*)NULL); + template + LogStream(const LogTargetImpl* type_carrier) : + _embedded_resource_mark(), + _stream(level, &LogTagSetMapping::tagset()) {} + + // Constructor to support creation from a LogTargetHandle. + // + // LogTarget(Debug, gc) log; + // LogTargetHandle(log) handle; + // LogStream stream(handle); + LogStream(LogTargetHandle handle) : + _embedded_resource_mark(), + _stream(handle._level, handle._tagset) {} + + // Override of outputStream::write. + void write(const char* s, size_t len) { _stream.write(s, len); } +}; + +// Support creation of a LogStream without having to provide a LogTarget pointer. +#define LogStreamHandle(level, ...) LogStreamTemplate + +template +class LogStreamTemplate : public LogStream { +public: + LogStreamTemplate() : LogStream((LogTargetImpl*)NULL) {} +}; + +#endif // SHARE_VM_LOGGING_LOGSTREAM_INLINE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logTag.hpp --- a/hotspot/src/share/vm/logging/logTag.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logTag.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -24,6 +24,7 @@ #ifndef SHARE_VM_LOGGING_LOGTAG_HPP #define SHARE_VM_LOGGING_LOGTAG_HPP +#include "logging/logTag_ext.hpp" #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" @@ -33,6 +34,7 @@ #define LOG_TAG_LIST \ LOG_TAG(alloc) \ LOG_TAG(age) \ + LOG_TAG(arguments) \ LOG_TAG(barrier) \ LOG_TAG(biasedlocking) \ LOG_TAG(bot) \ @@ -45,6 +47,7 @@ LOG_TAG(classunload) /* Trace unloading of classes */ \ LOG_TAG(classpath) \ LOG_TAG(compaction) \ + LOG_TAG(constraints) \ LOG_TAG(cpu) \ LOG_TAG(cset) \ LOG_TAG(defaultmethods) \ @@ -64,32 +67,41 @@ LOG_TAG(metaspace) \ LOG_TAG(modules) \ LOG_TAG(monitorinflation) \ + LOG_TAG(monitormismatch) \ LOG_TAG(os) \ LOG_TAG(phases) \ LOG_TAG(plab) \ LOG_TAG(promotion) \ + LOG_TAG(preorder) /* Trace all classes loaded in order referenced (not loaded) */ \ LOG_TAG(protectiondomain) /* "Trace protection domain verification" */ \ LOG_TAG(ref) \ LOG_TAG(refine) \ LOG_TAG(region) \ LOG_TAG(remset) \ LOG_TAG(safepoint) \ + LOG_TAG(safepointcleanup) \ LOG_TAG(scavenge) \ LOG_TAG(scrub) \ + LOG_TAG(stacktrace) \ LOG_TAG(start) \ LOG_TAG(startuptime) \ LOG_TAG(state) \ LOG_TAG(stats) \ LOG_TAG(stringdedup) \ + LOG_TAG(stringtable) \ LOG_TAG(survivor) \ LOG_TAG(sweep) \ LOG_TAG(task) \ + DEBUG_ONLY(LOG_TAG(test)) \ LOG_TAG(thread) \ LOG_TAG(tlab) \ LOG_TAG(time) \ + LOG_TAG(verification) \ LOG_TAG(verify) \ LOG_TAG(vmoperation) \ - LOG_TAG(vtables) + LOG_TAG(vtables) \ + LOG_TAG(workgang) \ + LOG_TAG_LIST_EXT #define PREFIX_LOG_TAG(T) (LogTag::_##T) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logTagSet.cpp --- a/hotspot/src/share/vm/logging/logTagSet.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logTagSet.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,14 +27,15 @@ #include "logging/logOutput.hpp" #include "logging/logTag.hpp" #include "logging/logTagSet.hpp" +#include "memory/allocation.inline.hpp" LogTagSet* LogTagSet::_list = NULL; size_t LogTagSet::_ntagsets = 0; // This constructor is called only during static initialization. // See the declaration in logTagSet.hpp for more information. -LogTagSet::LogTagSet(LogTagType t0, LogTagType t1, LogTagType t2, LogTagType t3, LogTagType t4) - : _next(_list) { +LogTagSet::LogTagSet(PrefixWriter prefix_writer, LogTagType t0, LogTagType t1, LogTagType t2, LogTagType t3, LogTagType t4) + : _next(_list), _write_prefix(prefix_writer) { _tag[0] = t0; _tag[1] = t1; _tag[2] = t2; @@ -49,10 +50,6 @@ _output_list.set_output_level(LogOutput::Stderr, LogLevel::Default); } -bool LogTagSet::is_level(LogLevelType level) const { - return _output_list.is_level(level); -} - void LogTagSet::update_decorators(const LogDecorators& decorator) { LogDecorators new_decorators = decorator; for (LogOutputList::Iterator it = _output_list.iterator(); it != _output_list.end(); it++) { @@ -90,3 +87,34 @@ } return tot_written; } + +void LogTagSet::write(LogLevelType level, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + vwrite(level, fmt, args); + va_end(args); +} + +const size_t vwrite_buffer_size = 512; + +void LogTagSet::vwrite(LogLevelType level, const char* fmt, va_list args) { + char buf[vwrite_buffer_size]; + va_list saved_args; // For re-format on buf overflow. + va_copy(saved_args, args); + size_t prefix_len = _write_prefix(buf, sizeof(buf)); + // Check that string fits in buffer; resize buffer if necessary + int ret = os::log_vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args); + assert(ret >= 0, "Log message buffer issue"); + if ((size_t)ret >= sizeof(buf)) { + size_t newbuf_len = prefix_len + ret + 1; + char* newbuf = NEW_C_HEAP_ARRAY(char, newbuf_len, mtLogging); + memcpy(newbuf, buf, prefix_len); + ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args); + assert(ret >= 0, "Log message buffer issue"); + log(level, newbuf); + FREE_C_HEAP_ARRAY(char, newbuf); + } else { + log(level, buf); + } + va_end(saved_args); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logTagSet.hpp --- a/hotspot/src/share/vm/logging/logTagSet.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/logging/logTagSet.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "logging/logDecorators.hpp" #include "logging/logLevel.hpp" #include "logging/logOutputList.hpp" +#include "logging/logPrefix.hpp" #include "logging/logTag.hpp" #include "utilities/globalDefinitions.hpp" @@ -45,14 +46,17 @@ LogOutputList _output_list; LogDecorators _decorators; + typedef size_t (*PrefixWriter)(char* buf, size_t size); + PrefixWriter _write_prefix; + // Keep constructor private to prevent incorrect instantiations of this class. // Only LogTagSetMappings can create/contain instances of this class. // The constructor links all tagsets together in a global list of tagsets. // This list is used during configuration to be able to update all tagsets // and their configurations to reflect the new global log configuration. - LogTagSet(LogTagType t0, LogTagType t1, LogTagType t2, LogTagType t3, LogTagType t4); + LogTagSet(PrefixWriter prefix_writer, LogTagType t0, LogTagType t1, LogTagType t2, LogTagType t3, LogTagType t4); - template + template friend class LogTagSetMapping; public: @@ -68,6 +72,10 @@ return _ntags; } + LogTagType tag(size_t idx) const { + return _tag[idx]; + } + bool contains(LogTagType tag) const { for (size_t i = 0; _tag[i] != LogTag::__NO_TAG; i++) { if (tag == _tag[i]) { @@ -91,14 +99,37 @@ int label(char *buf, size_t len, const char* separator = ",") const; bool has_output(const LogOutput* output); - bool is_level(LogLevelType level) const; + + // The implementation of this function is put here to ensure + // that it is inline:able by the log_is_enabled(level, ...) macro. + bool is_level(LogLevelType level) const { + return _output_list.is_level(level); + } void log(LogLevelType level, const char* msg); + + ATTRIBUTE_PRINTF(3, 4) + void write(LogLevelType level, const char* fmt, ...); + + template + ATTRIBUTE_PRINTF(2, 3) + void write(const char* fmt, ...) { + va_list args; + va_start(args, fmt); + vwrite(Level, fmt, args); + va_end(args); + } + + ATTRIBUTE_PRINTF(3, 0) + void vwrite(LogLevelType level, const char* fmt, va_list args); }; template + LogTagType T3 = LogTag::__NO_TAG, LogTagType T4 = LogTag::__NO_TAG, + LogTagType GuardTag = LogTag::__NO_TAG> class LogTagSetMapping : public AllStatic { private: + // Verify number of logging tags does not exceed maximum supported. + STATIC_ASSERT(GuardTag == LogTag::__NO_TAG); static LogTagSet _tagset; public: @@ -112,7 +143,7 @@ // Each combination of tags used as template arguments to the Log class somewhere (via macro or not) // will instantiate the LogTagSetMapping template, which in turn creates the static field for that // tagset. This _tagset contains the configuration for those tags. -template -LogTagSet LogTagSetMapping::_tagset(T0, T1, T2, T3, T4); +template +LogTagSet LogTagSetMapping::_tagset(&LogPrefix::prefix, T0, T1, T2, T3, T4); #endif // SHARE_VM_LOGGING_LOGTAGSET_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/logging/logTag_ext.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/logging/logTag_ext.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_VM_LOGGING_LOGTAG_EXT_HPP +#define SHARE_VM_LOGGING_LOGTAG_EXT_HPP + +#define LOG_TAG_LIST_EXT + +#endif // SHARE_VM_LOGGING_LOGTAG_EXT_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/allocation.cpp --- a/hotspot/src/share/vm/memory/allocation.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/allocation.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -242,7 +242,7 @@ ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } // Allocate a new chunk from the pool (might expand the pool) - _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { + NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) { assert(bytes == _size, "bad size"); void* p = NULL; // No VM lock can be taken inside ThreadCritical lock, so os::malloc diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/allocation.hpp --- a/hotspot/src/share/vm/memory/allocation.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/allocation.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -41,18 +41,6 @@ #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) - -// noinline attribute -#ifdef _WINDOWS - #define _NOINLINE_ __declspec(noinline) -#else - #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute - #define _NOINLINE_ - #else - #define _NOINLINE_ __attribute__ ((noinline)) - #endif -#endif - class AllocFailStrategy { public: enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; @@ -178,17 +166,17 @@ template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { public: - _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); - _NOINLINE_ void* operator new(size_t size) throw(); - _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, + NOINLINE void* operator new(size_t size, const NativeCallStack& stack) throw(); + NOINLINE void* operator new(size_t size) throw(); + NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw(); - _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) + NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); - _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); - _NOINLINE_ void* operator new [](size_t size) throw(); - _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, + NOINLINE void* operator new [](size_t size, const NativeCallStack& stack) throw(); + NOINLINE void* operator new [](size_t size) throw(); + NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw(); - _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) + NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw(); void operator delete(void* p); void operator delete [] (void* p); @@ -724,30 +712,42 @@ // is set so that we always use malloc except for Solaris where we set the // limit to get mapped memory. template -class ArrayAllocator VALUE_OBJ_CLASS_SPEC { - char* _addr; - bool _use_malloc; - size_t _size; - bool _free_in_destructor; +class ArrayAllocator : public AllStatic { + private: + static bool should_use_malloc(size_t length); - static bool should_use_malloc(size_t size) { - return size < ArrayAllocatorMallocLimit; - } + static E* allocate_malloc(size_t length); + static E* allocate_mmap(size_t length); - static char* allocate_inner(size_t& size, bool& use_malloc); + static void free_malloc(E* addr, size_t length); + static void free_mmap(E* addr, size_t length); + public: - ArrayAllocator(bool free_in_destructor = true) : - _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } + static E* allocate(size_t length); + static E* reallocate(E* old_addr, size_t old_length, size_t new_length); + static void free(E* addr, size_t length); +}; + +// Uses mmaped memory for all allocations. All allocations are initially +// zero-filled. No pre-touching. +template +class MmapArrayAllocator : public AllStatic { + private: + static size_t size_for(size_t length); - ~ArrayAllocator() { - if (_free_in_destructor) { - free(); - } - } + public: + static E* allocate(size_t length); + static void free(E* addr, size_t length); +}; - E* allocate(size_t length); - E* reallocate(size_t new_length); - void free(); +// Uses malloc:ed memory for all allocations. +template +class MallocArrayAllocator : public AllStatic { + public: + static size_t size_for(size_t length); + + static E* allocate(size_t length); + static void free(E* addr, size_t length); }; #endif // SHARE_VM_MEMORY_ALLOCATION_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/allocation.inline.hpp --- a/hotspot/src/share/vm/memory/allocation.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/allocation.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -151,66 +151,107 @@ } template -char* ArrayAllocator::allocate_inner(size_t &size, bool &use_malloc) { - char* addr = NULL; +size_t MmapArrayAllocator::size_for(size_t length) { + size_t size = length * sizeof(E); + int alignment = os::vm_allocation_granularity(); + return align_size_up(size, alignment); +} - if (use_malloc) { - addr = AllocateHeap(size, F); - if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) { - // malloc failed let's try with mmap instead - use_malloc = false; - } else { - return addr; - } - } +template +E* MmapArrayAllocator::allocate(size_t length) { + size_t size = size_for(length); + int alignment = os::vm_allocation_granularity(); - int alignment = os::vm_allocation_granularity(); - size = align_size_up(size, alignment); - - addr = os::reserve_memory(size, NULL, alignment, F); + char* addr = os::reserve_memory(size, NULL, alignment, F); if (addr == NULL) { vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)"); } os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)"); - return addr; + + return (E*)addr; +} + +template +void MmapArrayAllocator::free(E* addr, size_t length) { + bool result = os::release_memory((char*)addr, size_for(length)); + assert(result, "Failed to release memory"); +} + +template +size_t MallocArrayAllocator::size_for(size_t length) { + return length * sizeof(E); +} + +template +E* MallocArrayAllocator::allocate(size_t length) { + return (E*)AllocateHeap(size_for(length), F); +} + +template +void MallocArrayAllocator::free(E* addr, size_t /*length*/) { + FreeHeap(addr); +} + +template +bool ArrayAllocator::should_use_malloc(size_t length) { + return MallocArrayAllocator::size_for(length) < ArrayAllocatorMallocLimit; +} + +template +E* ArrayAllocator::allocate_malloc(size_t length) { + return MallocArrayAllocator::allocate(length); +} + +template +E* ArrayAllocator::allocate_mmap(size_t length) { + return MmapArrayAllocator::allocate(length); } template E* ArrayAllocator::allocate(size_t length) { - assert(_addr == NULL, "Already in use"); + if (should_use_malloc(length)) { + return allocate_malloc(length); + } - _size = sizeof(E) * length; - _use_malloc = should_use_malloc(_size); - _addr = allocate_inner(_size, _use_malloc); - - return (E*)_addr; + return allocate_mmap(length); } template -E* ArrayAllocator::reallocate(size_t new_length) { - size_t new_size = sizeof(E) * new_length; - bool use_malloc = should_use_malloc(new_size); - char* new_addr = allocate_inner(new_size, use_malloc); +E* ArrayAllocator::reallocate(E* old_addr, size_t old_length, size_t new_length) { + E* new_addr = (new_length > 0) + ? allocate(new_length) + : NULL; - memcpy(new_addr, _addr, MIN2(new_size, _size)); + if (new_addr != NULL && old_addr != NULL) { + memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E)); + } - free(); - _size = new_size; - _use_malloc = use_malloc; - _addr = new_addr; - return (E*)new_addr; + if (old_addr != NULL) { + free(old_addr, old_length); + } + + return new_addr; } template -void ArrayAllocator::free() { - if (_addr != NULL) { - if (_use_malloc) { - FreeHeap(_addr); +void ArrayAllocator::free_malloc(E* addr, size_t length) { + MallocArrayAllocator::free(addr, length); +} + +template +void ArrayAllocator::free_mmap(E* addr, size_t length) { + MmapArrayAllocator::free(addr, length); +} + +template +void ArrayAllocator::free(E* addr, size_t length) { + if (addr != NULL) { + if (should_use_malloc(length)) { + free_malloc(addr, length); } else { - os::release_memory(_addr, _size); + free_mmap(addr, length); } - _addr = NULL; } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/binaryTreeDictionary.cpp --- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/cms/allocationStats.hpp" #include "gc/shared/spaceDecorator.hpp" +#include "logging/logStream.inline.hpp" #include "memory/binaryTreeDictionary.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/freeList.hpp" @@ -1190,10 +1191,10 @@ // Does walking the tree 3 times hurt? set_tree_surplus(splitSurplusPercent); set_tree_hints(); - LogHandle(gc, freelist, stats) log; - if (log.is_trace()) { - ResourceMark rm; - report_statistics(log.trace_stream()); + LogTarget(Trace, gc, freelist, stats) log; + if (log.is_enabled()) { + LogStream out(log); + report_statistics(&out); } clear_tree_census(); } @@ -1232,27 +1233,26 @@ FreeList_t* total() { return &_total; } size_t total_free() { return _total_free; } void do_list(FreeList* fl) { - LogHandle(gc, freelist, census) log; - outputStream* out = log.debug_stream(); + LogStreamHandle(Debug, gc, freelist, census) out; + if (++_print_line >= 40) { - ResourceMark rm; - FreeList_t::print_labels_on(out, "size"); + FreeList_t::print_labels_on(&out, "size"); _print_line = 0; } - fl->print_on(out); + fl->print_on(&out); _total_free += fl->count() * fl->size(); total()->set_count(total()->count() + fl->count()); } #if INCLUDE_ALL_GCS void do_list(AdaptiveFreeList* fl) { - LogHandle(gc, freelist, census) log; - outputStream* out = log.debug_stream(); + LogStreamHandle(Debug, gc, freelist, census) out; + if (++_print_line >= 40) { - FreeList_t::print_labels_on(out, "size"); + FreeList_t::print_labels_on(&out, "size"); _print_line = 0; } - fl->print_on(out); + fl->print_on(&out); _total_free += fl->count() * fl->size() ; total()->set_count( total()->count() + fl->count() ); total()->set_bfr_surp( total()->bfr_surp() + fl->bfr_surp() ); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/filemap.cpp --- a/hotspot/src/share/vm/memory/filemap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/filemap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -372,7 +372,7 @@ fail_continue("Specified shared archive not found."); } else { fail_continue("Failed to open shared archive file (%s).", - strerror(errno)); + os::strerror(errno)); } return false; } @@ -402,7 +402,7 @@ int fd = open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444); if (fd < 0) { fail_stop("Unable to create shared archive file %s: (%s).", _full_path, - strerror(errno)); + os::strerror(errno)); } _fd = fd; _file_offset = 0; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/iterator.hpp --- a/hotspot/src/share/vm/memory/iterator.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/iterator.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -213,6 +213,16 @@ virtual bool do_object_b(oop obj) = 0; }; +class AlwaysTrueClosure: public BoolObjectClosure { + public: + bool do_object_b(oop p) { return true; } +}; + +class AlwaysFalseClosure : public BoolObjectClosure { + public: + bool do_object_b(oop p) { return false; } +}; + // Applies an oop closure to all ref fields in objects iterated over in an // object iteration. class ObjectToOopClosure: public ObjectClosure { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/metaspace.cpp --- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/metaspace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -759,7 +759,6 @@ void verify(); void verify_chunk_size(Metachunk* chunk); - NOT_PRODUCT(void mangle_freed_chunks();) #ifdef ASSERT void verify_allocated_blocks_words(); #endif @@ -889,7 +888,7 @@ "The committed memory doesn't match the expanded memory."); if (!is_available(chunk_word_size)) { - LogHandle(gc, metaspace, freelist) log; + Log(gc, metaspace, freelist) log; log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); // Dump some information about the virtual space that is nearly full ResourceMark rm; @@ -1230,7 +1229,7 @@ new_entry->mangle(); #endif if (log_is_enabled(Trace, gc, metaspace)) { - LogHandle(gc, metaspace) log; + Log(gc, metaspace) log; VirtualSpaceNode* vsl = current_virtual_space(); ResourceMark rm; vsl->print_on(log.trace_stream()); @@ -1569,7 +1568,7 @@ } else { _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); } - log_trace(gc, metaspace)(" shrinking: initSize: %.1fK maximum_desired_capacity: %.1fK", + log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); @@ -1792,7 +1791,7 @@ assert((word_size <= chunk->word_size()) || list_index(chunk->word_size() == HumongousIndex), "Non-humongous variable sized chunk"); - LogHandle(gc, metaspace, freelist) log; + Log(gc, metaspace, freelist) log; if (log.is_debug()) { size_t list_count; if (list_index(word_size) < HumongousIndex) { @@ -1991,7 +1990,7 @@ "Size calculation is wrong, word_size " SIZE_FORMAT " chunk_word_size " SIZE_FORMAT, word_size, chunk_word_size); - LogHandle(gc, metaspace, alloc) log; + Log(gc, metaspace, alloc) log; if (log.is_debug() && SpaceManager::is_humongous(word_size)) { log.debug("Metadata humongous allocation:"); log.debug(" word_size " PTR_FORMAT, word_size); @@ -2160,7 +2159,7 @@ dec_total_from_size_metrics(); - LogHandle(gc, metaspace, freelist) log; + Log(gc, metaspace, freelist) log; if (log.is_trace()) { log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); ResourceMark rm; @@ -2300,7 +2299,7 @@ inc_size_metrics(new_chunk->word_size()); assert(new_chunk->is_empty(), "Not ready for reuse"); - LogHandle(gc, metaspace, freelist) log; + Log(gc, metaspace, freelist) log; if (log.is_trace()) { log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); ResourceMark rm; @@ -2331,7 +2330,7 @@ medium_chunk_bunch()); } - LogHandle(gc, metaspace, alloc) log; + Log(gc, metaspace, alloc) log; if (log.is_debug() && next != NULL && SpaceManager::is_humongous(next->word_size())) { log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); @@ -2512,20 +2511,6 @@ " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); } -#ifndef PRODUCT -void SpaceManager::mangle_freed_chunks() { - for (ChunkIndex index = ZeroIndex; - index < NumberOfInUseLists; - index = next_chunk_index(index)) { - for (Metachunk* curr = chunks_in_use(index); - curr != NULL; - curr = curr->next()) { - curr->mangle(uninitMetaWordVal); - } - } -} -#endif // PRODUCT - // MetaspaceAux @@ -3045,7 +3030,7 @@ initialize_class_space(metaspace_rs); if (log_is_enabled(Trace, gc, metaspace)) { - LogHandle(gc, metaspace) log; + Log(gc, metaspace) log; ResourceMark rm; print_compressed_class_space(log.trace_stream(), requested_addr); } @@ -3520,7 +3505,7 @@ tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); // If result is still null, we are out of memory. - LogHandle(gc, metaspace, freelist) log; + Log(gc, metaspace, freelist) log; if (log.is_trace()) { log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size); ResourceMark rm; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/metaspaceShared.cpp --- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -38,9 +38,10 @@ #include "memory/filemap.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/os.hpp" #include "runtime/signature.hpp" #include "runtime/vmThread.hpp" @@ -775,7 +776,7 @@ // Preload classes from a list, populate the shared spaces and dump to a // file. void MetaspaceShared::preload_and_dump(TRAPS) { - { TraceStartupTime timer("Dump Shared Spaces"); + { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime)); ResourceMark rm; char class_list_path_str[JVM_MAXPATHLEN]; @@ -882,7 +883,7 @@ InstanceKlass* ik = InstanceKlass::cast(klass); - // Should be class load order as per -XX:+TraceClassLoadingPreorder + // Should be class load order as per -Xlog:classload+preorder class_promote_order->append(ik); // Link the class to cause the bytecodes to be rewritten and the diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/universe.cpp --- a/hotspot/src/share/vm/memory/universe.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/universe.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -44,6 +44,7 @@ #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "memory/universe.inline.hpp" #include "oops/constantPool.hpp" @@ -64,11 +65,10 @@ #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/logTimer.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" -#include "runtime/timer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vm_operations.hpp" #include "services/memoryService.hpp" #include "utilities/copy.hpp" @@ -377,8 +377,7 @@ // We can allocate directly in the permanent generation, so we do. int size; if (UseConcMarkSweepGC) { - warning("Using +FullGCALot with concurrent mark sweep gc " - "will not force all objects to relocate"); + log_warning(gc)("Using +FullGCALot with concurrent mark sweep gc will not force all objects to relocate"); size = FullGCALotDummies; } else { size = FullGCALotDummies * 2; @@ -629,7 +628,7 @@ guarantee(sizeof(oop) % sizeof(HeapWord) == 0, "oop size is not not a multiple of HeapWord size"); - TraceStartupTime timer("Genesis"); + TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime)); JavaClasses::compute_hard_coded_offsets(); @@ -880,6 +879,57 @@ return UnscaledNarrowOop; } +void initialize_known_method(LatestMethodCache* method_cache, + InstanceKlass* ik, + const char* method, + Symbol* signature, + bool is_static, TRAPS) +{ + TempNewSymbol name = SymbolTable::new_symbol(method, CHECK); + Method* m = NULL; + // The klass must be linked before looking up the method. + if (!ik->link_class_or_fail(THREAD) || + ((m = ik->find_method(name, signature)) == NULL) || + is_static != m->is_static()) { + ResourceMark rm(THREAD); + // NoSuchMethodException doesn't actually work because it tries to run the + // function before java_lang_Class is linked. Print error and exit. + vm_exit_during_initialization(err_msg("Unable to link/verify %s.%s method", + ik->name()->as_C_string(), method)); + } + method_cache->init(ik, m); +} + +void Universe::initialize_known_methods(TRAPS) { + // Set up static method for registering finalizers + initialize_known_method(_finalizer_register_cache, + SystemDictionary::Finalizer_klass(), + "register", + vmSymbols::object_void_signature(), true, CHECK); + + initialize_known_method(_throw_illegal_access_error_cache, + SystemDictionary::internal_Unsafe_klass(), + "throwIllegalAccessError", + vmSymbols::void_method_signature(), true, CHECK); + + // Set up method for registering loaded classes in class loader vector + initialize_known_method(_loader_addClass_cache, + SystemDictionary::ClassLoader_klass(), + "addClass", + vmSymbols::class_void_signature(), false, CHECK); + + // Set up method for checking protection domain + initialize_known_method(_pd_implies_cache, + SystemDictionary::ProtectionDomain_klass(), + "impliesCreateAccessControlContext", + vmSymbols::void_boolean_signature(), false, CHECK); + + // Set up method for stack walking + initialize_known_method(_do_stack_walk_cache, + SystemDictionary::AbstractStackWalker_klass(), + "doStackWalk", + vmSymbols::doStackWalk_signature(), false, CHECK); +} void universe2_init() { EXCEPTION_MARK; @@ -908,46 +958,46 @@ HandleMark hm(THREAD); Klass* k; instanceKlassHandle k_h; - // Setup preallocated empty java.lang.Class array - Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false); + // Setup preallocated empty java.lang.Class array + Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false); - // Setup preallocated OutOfMemoryError errors - k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); - k_h = instanceKlassHandle(THREAD, k); - Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_gc_overhead_limit = - k_h->allocate_instance(CHECK_false); - Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false); + // Setup preallocated OutOfMemoryError errors + k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); + k_h = instanceKlassHandle(THREAD, k); + Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_gc_overhead_limit = + k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false); - // Setup preallocated cause message for delayed StackOverflowError - if (StackReservedPages > 0) { - Universe::_delayed_stack_overflow_error_message = - java_lang_String::create_oop_from_str("Delayed StackOverflowError due to ReservedStackAccess annotated method", CHECK_false); - } + // Setup preallocated cause message for delayed StackOverflowError + if (StackReservedPages > 0) { + Universe::_delayed_stack_overflow_error_message = + java_lang_String::create_oop_from_str("Delayed StackOverflowError due to ReservedStackAccess annotated method", CHECK_false); + } - // Setup preallocated NullPointerException - // (this is currently used for a cheap & dirty solution in compiler exception handling) - k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); - Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); - // Setup preallocated ArithmeticException - // (this is currently used for a cheap & dirty solution in compiler exception handling) - k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false); - Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); - // Virtual Machine Error for when we get into a situation we can't resolve - k = SystemDictionary::resolve_or_fail( - vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false); - bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false); - if (!linked) { - tty->print_cr("Unable to link/verify VirtualMachineError class"); - return false; // initialization failed - } - Universe::_virtual_machine_error_instance = - InstanceKlass::cast(k)->allocate_instance(CHECK_false); + // Setup preallocated NullPointerException + // (this is currently used for a cheap & dirty solution in compiler exception handling) + k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); + Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + // Setup preallocated ArithmeticException + // (this is currently used for a cheap & dirty solution in compiler exception handling) + k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false); + Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + // Virtual Machine Error for when we get into a situation we can't resolve + k = SystemDictionary::resolve_or_fail( + vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false); + bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false); + if (!linked) { + tty->print_cr("Unable to link/verify VirtualMachineError class"); + return false; // initialization failed + } + Universe::_virtual_machine_error_instance = + InstanceKlass::cast(k)->allocate_instance(CHECK_false); - Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); if (!DumpSharedSpaces) { // These are the only Java fields that are currently set during shared space dumping. @@ -988,71 +1038,7 @@ Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; } - - // Setup static method for registering finalizers - // The finalizer klass must be linked before looking up the method, in - // case it needs to get rewritten. - SystemDictionary::Finalizer_klass()->link_class(CHECK_false); - Method* m = SystemDictionary::Finalizer_klass()->find_method( - vmSymbols::register_method_name(), - vmSymbols::register_method_signature()); - if (m == NULL || !m->is_static()) { - tty->print_cr("Unable to link/verify Finalizer.register method"); - return false; // initialization failed (cannot throw exception yet) - } - Universe::_finalizer_register_cache->init( - SystemDictionary::Finalizer_klass(), m); - - SystemDictionary::internal_Unsafe_klass()->link_class(CHECK_false); - m = SystemDictionary::internal_Unsafe_klass()->find_method( - vmSymbols::throwIllegalAccessError_name(), - vmSymbols::void_method_signature()); - if (m != NULL && !m->is_static()) { - // Note null is okay; this method is used in itables, and if it is null, - // then AbstractMethodError is thrown instead. - tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method"); - return false; // initialization failed (cannot throw exception yet) - } - Universe::_throw_illegal_access_error_cache->init( - SystemDictionary::internal_Unsafe_klass(), m); - - // Setup method for registering loaded classes in class loader vector - SystemDictionary::ClassLoader_klass()->link_class(CHECK_false); - m = SystemDictionary::ClassLoader_klass()->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); - if (m == NULL || m->is_static()) { - tty->print_cr("Unable to link/verify ClassLoader.addClass method"); - return false; // initialization failed (cannot throw exception yet) - } - Universe::_loader_addClass_cache->init( - SystemDictionary::ClassLoader_klass(), m); - - // Setup method for checking protection domain - SystemDictionary::ProtectionDomain_klass()->link_class(CHECK_false); - m = SystemDictionary::ProtectionDomain_klass()-> - find_method(vmSymbols::impliesCreateAccessControlContext_name(), - vmSymbols::void_boolean_signature()); - // Allow NULL which should only happen with bootstrapping. - if (m != NULL) { - if (m->is_static()) { - // NoSuchMethodException doesn't actually work because it tries to run the - // function before java_lang_Class is linked. Print error and exit. - tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage"); - return false; // initialization failed - } - Universe::_pd_implies_cache->init( - SystemDictionary::ProtectionDomain_klass(), m); - } - - // Setup method for stack walking - InstanceKlass::cast(SystemDictionary::AbstractStackWalker_klass())->link_class(CHECK_false); - m = InstanceKlass::cast(SystemDictionary::AbstractStackWalker_klass())-> - find_method(vmSymbols::doStackWalk_name(), - vmSymbols::doStackWalk_signature()); - // Allow NULL which should only happen with bootstrapping. - if (m != NULL) { - Universe::_do_stack_walk_cache->init( - SystemDictionary::AbstractStackWalker_klass(), m); - } + Universe::initialize_known_methods(CHECK_false); // This needs to be done before the first scavenge/gc, since // it's an input to soft ref clearing policy. @@ -1097,20 +1083,20 @@ } void Universe::print_heap_before_gc() { - LogHandle(gc, heap) log; - if (log.is_trace()) { - log.trace("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections()); + Log(gc, heap) log; + if (log.is_debug()) { + log.debug("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections()); ResourceMark rm; - heap()->print_on(log.trace_stream()); + heap()->print_on(log.debug_stream()); } } void Universe::print_heap_after_gc() { - LogHandle(gc, heap) log; - if (log.is_trace()) { - log.trace("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections()); + Log(gc, heap) log; + if (log.is_debug()) { + log.debug("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections()); ResourceMark rm; - heap()->print_on(log.trace_stream()); + heap()->print_on(log.debug_stream()); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/memory/universe.hpp --- a/hotspot/src/share/vm/memory/universe.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/memory/universe.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -323,6 +323,9 @@ static Method* do_stack_walk_method() { return _do_stack_walk_cache->get_method(); } + // Function to initialize these + static void initialize_known_methods(TRAPS); + static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; } static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; } static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/arrayKlass.cpp --- a/hotspot/src/share/vm/oops/arrayKlass.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/gcLocker.hpp" #include "jvmtifiles/jvmti.h" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/constMethod.cpp --- a/hotspot/src/share/vm/oops/constMethod.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/constMethod.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,6 +27,7 @@ #include "interpreter/interpreter.hpp" #include "memory/heapInspection.hpp" #include "memory/metadataFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/constMethod.hpp" #include "oops/method.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/constantPool.cpp --- a/hotspot/src/share/vm/oops/constantPool.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/constantPool.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -33,6 +33,7 @@ #include "memory/heapInspection.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/constantPool.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayKlass.hpp" @@ -208,11 +209,11 @@ if (k() != this_cp->pool_holder()) { // only print something if the classes are different if (source_file != NULL) { - log_info(classresolve)("%s %s %s:%d", + log_debug(classresolve)("%s %s %s:%d", this_cp->pool_holder()->external_name(), k->external_name(), source_file, line_number); } else { - log_info(classresolve)("%s %s", + log_debug(classresolve)("%s %s", this_cp->pool_holder()->external_name(), k->external_name()); } @@ -281,15 +282,10 @@ ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data(); this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM - if (log_is_enabled(Info, classresolve) && !k->is_array_klass()) { - // skip resolving the constant pool so that this code gets - // called the next time some bytecodes refer to this class. - trace_class_resolution(this_cp, k); - return k(); - } else { - this_cp->klass_at_put(which, k()); - } + // logging for classresolve tag. + trace_class_resolution(this_cp, k); + this_cp->klass_at_put(which, k()); entry = this_cp->resolved_klass_at(which); assert(entry.is_resolved() && entry.get_klass()->is_klass(), "must be resolved at this point"); return entry.get_klass(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/cpCache.cpp --- a/hotspot/src/share/vm/oops/cpCache.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/cpCache.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/rewriter.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/cpCache.hpp" #include "oops/objArrayOop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/generateOopMap.cpp --- a/hotspot/src/share/vm/oops/generateOopMap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/generateOopMap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,13 +24,16 @@ #include "precompiled.hpp" #include "interpreter/bytecodeStream.hpp" +#include "logging/log.hpp" #include "oops/generateOopMap.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/relocator.hpp" +#include "runtime/timerTrace.hpp" #include "utilities/bitMap.inline.hpp" +#include "utilities/ostream.hpp" #include "prims/methodHandles.hpp" // @@ -786,7 +789,7 @@ bb->set_changed(true); } } else { - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("monitor stack height merge conflict"); } // When the monitor stacks are not matched, we set _monitor_top to @@ -855,7 +858,7 @@ _monitor_safe = false; _monitor_top = bad_monitors; - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("monitor stack underflow"); } return CellTypeState::ref; // just to keep the analysis going. @@ -871,7 +874,7 @@ _monitor_safe = false; _monitor_top = bad_monitors; - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("monitor stack overflow"); } return; @@ -1244,7 +1247,7 @@ // We don't set _monitor_top to bad_monitors because there are no successors // to this exceptional exit. - if (TraceMonitorMismatch && _monitor_safe) { + if (log_is_enabled(Info, monitormismatch) && _monitor_safe) { // We check _monitor_safe so that we only report the first mismatched // exceptional exit. report_monitor_mismatch("non-empty monitor stack at exceptional exit"); @@ -1254,11 +1257,11 @@ } void GenerateOopMap::report_monitor_mismatch(const char *msg) { -#ifndef PRODUCT - tty->print(" Monitor mismatch in method "); - method()->print_short_name(tty); - tty->print_cr(": %s", msg); -#endif + ResourceMark rm; + outputStream* out = Log(monitormismatch)::info_stream(); + out->print("Monitor mismatch in method "); + method()->print_short_name(out); + out->print_cr(": %s", msg); } void GenerateOopMap::print_states(outputStream *os, @@ -1781,7 +1784,7 @@ _monitor_top = bad_monitors; _monitor_safe = false; - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("nested redundant lock -- bailout..."); } return; @@ -1819,7 +1822,7 @@ bb->set_changed(true); bb->_monitor_top = bad_monitors; - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("improper monitor pair"); } } else { @@ -1845,7 +1848,7 @@ // Since there are no successors to the *return bytecode, it // isn't necessary to set _monitor_top to bad_monitors. - if (TraceMonitorMismatch) { + if (log_is_enabled(Info, monitormismatch)) { report_monitor_mismatch("non-empty monitor stack at return"); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/instanceKlass.cpp --- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -41,6 +41,7 @@ #include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/fieldStreams.hpp" #include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceKlass.inline.hpp" @@ -1089,7 +1090,7 @@ assert(!this_k->is_initialized(), "we cannot initialize twice"); if (log_is_enabled(Info, classinit)) { ResourceMark rm; - outputStream* log = LogHandle(classinit)::info_stream(); + outputStream* log = Log(classinit)::info_stream(); log->print("%d Initializing ", call_class_initializer_impl_counter++); this_k->name()->print_value_on(log); log->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this_k())); @@ -3010,11 +3011,11 @@ assert(type == LogLevel::Info || type == LogLevel::Debug, "sanity"); if (type == LogLevel::Info) { - log = LogHandle(classload)::info_stream(); + log = Log(classload)::info_stream(); } else { assert(type == LogLevel::Debug, "print_loading_log supports only Debug and Info levels"); - log = LogHandle(classload)::debug_stream(); + log = Log(classload)::debug_stream(); } // Name and class hierarchy info diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/instanceKlass.inline.hpp --- a/hotspot/src/share/vm/oops/instanceKlass.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/instanceKlass.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,16 +36,9 @@ // The iteration over the oops in objects is a hot path in the GC code. // By force inlining the following functions, we get similar GC performance // as the previous macro based implementation. -#ifdef TARGET_COMPILER_visCPP -#define INLINE __forceinline -#elif defined(TARGET_COMPILER_sparcWorks) -#define INLINE __attribute__((always_inline)) -#else -#define INLINE inline -#endif template -INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { T* p = (T*)obj->obj_field_addr(map->offset()); T* const end = p + map->count(); @@ -56,7 +49,7 @@ #if INCLUDE_ALL_GCS template -INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { T* const start = (T*)obj->obj_field_addr(map->offset()); T* p = start + map->count(); @@ -68,7 +61,7 @@ #endif template -INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { T* p = (T*)obj->obj_field_addr(map->offset()); T* end = p + map->count(); @@ -91,7 +84,7 @@ } template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) { OopMapBlock* map = start_of_nonstatic_oop_maps(); OopMapBlock* const end_map = map + nonstatic_oop_map_count(); @@ -102,7 +95,7 @@ #if INCLUDE_ALL_GCS template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) { OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); OopMapBlock* map = start_map + nonstatic_oop_map_count(); @@ -114,7 +107,7 @@ #endif template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) { OopMapBlock* map = start_of_nonstatic_oop_maps(); OopMapBlock* const end_map = map + nonstatic_oop_map_count(); @@ -124,7 +117,7 @@ } template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) { if (UseCompressedOops) { oop_oop_iterate_oop_maps_specialized(obj, closure); } else { @@ -134,7 +127,7 @@ #if INCLUDE_ALL_GCS template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) { if (UseCompressedOops) { oop_oop_iterate_oop_maps_specialized_reverse(obj, closure); } else { @@ -144,7 +137,7 @@ #endif template -INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) { +ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) { if (UseCompressedOops) { oop_oop_iterate_oop_maps_specialized_bounded(obj, closure, mr); } else { @@ -153,7 +146,7 @@ } template -INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { +ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { if (Devirtualizer::do_metadata(closure)) { Devirtualizer::do_klass(closure, this); } @@ -165,7 +158,7 @@ #if INCLUDE_ALL_GCS template -INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { +ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { assert(!Devirtualizer::do_metadata(closure), "Code to handle metadata is not implemented"); @@ -176,7 +169,7 @@ #endif template -INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { +ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { if (Devirtualizer::do_metadata(closure)) { if (mr.contains(obj)) { Devirtualizer::do_klass(closure, this); @@ -188,8 +181,6 @@ return size_helper(); } -#undef INLINE - #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \ OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/instanceRefKlass.cpp --- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,13 +25,8 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" -#include "gc/shared/collectedHeap.inline.hpp" -#include "gc/shared/genCollectedHeap.hpp" -#include "gc/shared/specialized_oop_closures.hpp" #include "oops/instanceRefKlass.inline.hpp" #include "oops/oop.inline.hpp" -#include "utilities/macros.hpp" -#include "utilities/preserveException.hpp" void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) { // Clear the nonstatic oop-map entries corresponding to referent @@ -87,48 +82,3 @@ guarantee(InstanceKlass::cast(next->klass())->is_reference_instance_klass(), "next field verify failed"); } } - -bool InstanceRefKlass::owns_pending_list_lock(JavaThread* thread) { - if (java_lang_ref_Reference::pending_list_lock() == NULL) return false; - Handle h_lock(thread, java_lang_ref_Reference::pending_list_lock()); - return ObjectSynchronizer::current_thread_holds_lock(thread, h_lock); -} - -void InstanceRefKlass::acquire_pending_list_lock(BasicLock *pending_list_basic_lock) { - // we may enter this with pending exception set - PRESERVE_EXCEPTION_MARK; // exceptions are never thrown, needed for TRAPS argument - - // Create a HandleMark in case we retry a GC multiple times. - // Each time we attempt the GC, we allocate the handle below - // to hold the pending list lock. We want to free this handle. - HandleMark hm; - - Handle h_lock(THREAD, java_lang_ref_Reference::pending_list_lock()); - ObjectSynchronizer::fast_enter(h_lock, pending_list_basic_lock, false, THREAD); - assert(ObjectSynchronizer::current_thread_holds_lock( - JavaThread::current(), h_lock), - "Locking should have succeeded"); - if (HAS_PENDING_EXCEPTION) CLEAR_PENDING_EXCEPTION; -} - -void InstanceRefKlass::release_and_notify_pending_list_lock( - BasicLock *pending_list_basic_lock) { - // we may enter this with pending exception set - PRESERVE_EXCEPTION_MARK; // exceptions are never thrown, needed for TRAPS argument - - // Create a HandleMark in case we retry a GC multiple times. - // Each time we attempt the GC, we allocate the handle below - // to hold the pending list lock. We want to free this handle. - HandleMark hm; - - Handle h_lock(THREAD, java_lang_ref_Reference::pending_list_lock()); - assert(ObjectSynchronizer::current_thread_holds_lock( - JavaThread::current(), h_lock), - "Lock should be held"); - // Notify waiters on pending lists lock if there is any reference. - if (java_lang_ref_Reference::pending_list() != NULL) { - ObjectSynchronizer::notifyall(h_lock, THREAD); - } - ObjectSynchronizer::fast_exit(h_lock(), pending_list_basic_lock, THREAD); - if (HAS_PENDING_EXCEPTION) CLEAR_PENDING_EXCEPTION; -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/instanceRefKlass.hpp --- a/hotspot/src/share/vm/oops/instanceRefKlass.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/instanceRefKlass.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -118,10 +118,6 @@ ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS - static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock); - static void acquire_pending_list_lock(BasicLock *pending_list_basic_lock); - static bool owns_pending_list_lock(JavaThread* thread); - // Update non-static oop maps so 'referent', 'nextPending' and // 'discovered' will look like non-oops static void update_nonstatic_oop_maps(Klass* k); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/klassVtable.cpp --- a/hotspot/src/share/vm/oops/klassVtable.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/klassVtable.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -274,7 +274,7 @@ if (supersuperklass->is_override(super_method, target_loader, target_classname, THREAD)) { if (log_develop_is_enabled(Trace, vtables)) { ResourceMark rm(THREAD); - outputStream* logst = LogHandle(vtables)::trace_stream(); + outputStream* logst = Log(vtables)::trace_stream(); char* sig = target_method()->name_and_sig_as_C_string(); logst->print("transitive overriding superclass %s with %s::%s index %d, original flags: ", supersuperklass->internal_name(), @@ -305,7 +305,7 @@ #ifndef PRODUCT if (log_develop_is_enabled(Trace, vtables)) { ResourceMark rm(thread); - outputStream* logst = LogHandle(vtables)::trace_stream(); + outputStream* logst = Log(vtables)::trace_stream(); char* sig = target_method()->name_and_sig_as_C_string(); if (overrides) { logst->print("overriding with %s::%s index %d, original flags: ", @@ -493,7 +493,7 @@ void klassVtable::put_method_at(Method* m, int index) { if (log_develop_is_enabled(Trace, vtables)) { ResourceMark rm; - outputStream* logst = LogHandle(vtables)::trace_stream(); + outputStream* logst = Log(vtables)::trace_stream(); const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : ""; logst->print("adding %s at index %d, flags: ", sig, index); if (m != NULL) { @@ -821,7 +821,7 @@ if (log_develop_is_enabled(Trace, vtables)) { Method* meth = mirandas.at(i); ResourceMark rm(Thread::current()); - outputStream* logst = LogHandle(vtables)::trace_stream(); + outputStream* logst = Log(vtables)::trace_stream(); if (meth != NULL) { char* sig = meth->name_and_sig_as_C_string(); logst->print("fill in mirandas with %s index %d, flags: ", @@ -1045,7 +1045,7 @@ // If m is already assigned a vtable index, do not disturb it. if (log_develop_is_enabled(Trace, itables)) { ResourceMark rm; - outputStream* logst = LogHandle(itables)::trace_stream(); + outputStream* logst = Log(itables)::trace_stream(); assert(m != NULL, "methods can never be null"); const char* sig = m->name_and_sig_as_C_string(); if (m->has_vtable_index()) { @@ -1161,7 +1161,7 @@ if (log_develop_is_enabled(Trace, itables)) { ResourceMark rm(THREAD); if (target() != NULL) { - outputStream* logst = LogHandle(itables)::trace_stream(); + outputStream* logst = Log(itables)::trace_stream(); char* sig = target()->name_and_sig_as_C_string(); logst->print("interface: %s, ime_num: %d, target: %s, method_holder: %s ", interf_h()->internal_name(), ime_num, sig, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/method.cpp --- a/hotspot/src/share/vm/oops/method.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/method.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/gcLocker.hpp" #include "gc/shared/generation.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodeTracer.hpp" #include "interpreter/bytecodes.hpp" @@ -38,6 +39,7 @@ #include "memory/heapInspection.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/constMethod.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" @@ -374,7 +376,7 @@ // Do not profile method if current thread holds the pending list lock, // which avoids deadlock for acquiring the MethodData_lock. - if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) { + if (ReferencePendingListLocker::is_locked_by_self()) { return; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/method.hpp --- a/hotspot/src/share/vm/oops/method.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/method.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -264,6 +264,7 @@ int highest_osr_comp_level() const; void set_highest_osr_comp_level(int level); +#if defined(COMPILER2) || INCLUDE_JVMCI // Count of times method was exited via exception while interpreting void interpreter_throwout_increment(TRAPS) { MethodCounters* mcs = get_method_counters(CHECK); @@ -271,6 +272,7 @@ mcs->interpreter_throwout_increment(); } } +#endif int interpreter_throwout_count() const { MethodCounters* mcs = method_counters(); @@ -407,11 +409,13 @@ return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); } } +#if defined(COMPILER2) || INCLUDE_JVMCI int increment_interpreter_invocation_count(TRAPS) { if (TieredCompilation) ShouldNotReachHere(); MethodCounters* mcs = get_method_counters(CHECK_0); return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); } +#endif #ifndef PRODUCT int compiled_invocation_count() const { return _compiled_invocation_count; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/methodCounters.hpp --- a/hotspot/src/share/vm/oops/methodCounters.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/methodCounters.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,10 @@ friend class VMStructs; friend class JVMCIVMStructs; private: +#if defined(COMPILER2) || INCLUDE_JVMCI int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting +#endif u2 _number_of_breakpoints; // fullspeed debugging support InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations @@ -60,9 +62,7 @@ u1 _highest_osr_comp_level; // Same for OSR level #endif - MethodCounters(methodHandle mh) : _interpreter_invocation_count(0), - _interpreter_throwout_count(0), - _number_of_breakpoints(0), + MethodCounters(methodHandle mh) : _number_of_breakpoints(0), _nmethod_age(INT_MAX) #ifdef TIERED , _rate(0), @@ -71,6 +71,8 @@ _highest_osr_comp_level(0) #endif { + set_interpreter_invocation_count(0); + set_interpreter_throwout_count(0); invocation_counter()->init(); backedge_counter()->init(); @@ -109,6 +111,8 @@ void clear_counters(); +#if defined(COMPILER2) || INCLUDE_JVMCI + int interpreter_invocation_count() { return _interpreter_invocation_count; } @@ -131,6 +135,24 @@ _interpreter_throwout_count = count; } +#else // defined(COMPILER2) || INCLUDE_JVMCI + + int interpreter_invocation_count() { + return 0; + } + void set_interpreter_invocation_count(int count) { + assert(count == 0, "count must be 0"); + } + + int interpreter_throwout_count() const { + return 0; + } + void set_interpreter_throwout_count(int count) { + assert(count == 0, "count must be 0"); + } + +#endif // defined(COMPILER2) || INCLUDE_JVMCI + u2 number_of_breakpoints() const { return _number_of_breakpoints; } void incr_number_of_breakpoints() { ++_number_of_breakpoints; } void decr_number_of_breakpoints() { --_number_of_breakpoints; } @@ -170,10 +192,25 @@ return byte_offset_of(MethodCounters, _nmethod_age); } +#if defined(COMPILER2) || INCLUDE_JVMCI + static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(MethodCounters, _interpreter_invocation_count); } + static int interpreter_invocation_counter_offset_in_bytes() { + return offset_of(MethodCounters, _interpreter_invocation_count); + } + +#else // defined(COMPILER2) || INCLUDE_JVMCI + + static ByteSize interpreter_invocation_counter_offset() { + ShouldNotReachHere(); + return in_ByteSize(0); + } + +#endif // defined(COMPILER2) || INCLUDE_JVMCI + static ByteSize invocation_counter_offset() { return byte_offset_of(MethodCounters, _invocation_counter); } @@ -182,10 +219,6 @@ return byte_offset_of(MethodCounters, _backedge_counter); } - static int interpreter_invocation_counter_offset_in_bytes() { - return offset_of(MethodCounters, _interpreter_invocation_count); - } - static ByteSize interpreter_invocation_limit_offset() { return byte_offset_of(MethodCounters, _interpreter_invocation_limit); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/methodData.cpp --- a/hotspot/src/share/vm/oops/methodData.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/methodData.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "interpreter/bytecodeStream.hpp" #include "interpreter/linkResolver.hpp" #include "memory/heapInspection.hpp" +#include "memory/resourceArea.hpp" #include "oops/methodData.hpp" #include "prims/jvmtiRedefineClasses.hpp" #include "runtime/arguments.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/objArrayKlass.cpp --- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -461,8 +461,6 @@ #endif //PRODUCT -static int max_objArray_print_length = 4; - void ObjArrayKlass::oop_print_value_on(oop obj, outputStream* st) { assert(obj->is_objArray(), "must be objArray"); st->print("a "); @@ -470,16 +468,6 @@ int len = objArrayOop(obj)->length(); st->print("[%d] ", len); obj->print_address_on(st); - if (NOT_PRODUCT(PrintOopAddress ||) PrintMiscellaneous && (WizardMode || Verbose)) { - st->print("{"); - for (int i = 0; i < len; i++) { - if (i > max_objArray_print_length) { - st->print("..."); break; - } - st->print(" " INTPTR_FORMAT, (intptr_t)(void*)objArrayOop(obj)->obj_at(i)); - } - st->print(" }"); - } } const char* ObjArrayKlass::internal_name() const { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/oops/oop.cpp --- a/hotspot/src/share/vm/oops/oop.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/oops/oop.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/altHashing.hpp" #include "classfile/javaClasses.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "runtime/handles.inline.hpp" @@ -44,9 +45,8 @@ } void oopDesc::print_address_on(outputStream* st) const { - if (PrintOopAddress) { - st->print("{" INTPTR_FORMAT "}", p2i(this)); - } + st->print("{" INTPTR_FORMAT "}", p2i(this)); + } void oopDesc::print() { print_on(tty); } @@ -76,7 +76,7 @@ st->print("NULL"); } else if (java_lang_String::is_instance(obj)) { java_lang_String::print(obj, st); - if (PrintOopAddress) print_address_on(st); + print_address_on(st); } else { klass()->oop_print_value_on(obj, st); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/block.cpp --- a/hotspot/src/share/vm/opto/block.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/block.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "compiler/compilerDirectives.hpp" #include "opto/block.hpp" #include "opto/cfgnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/buildOopMap.cpp --- a/hotspot/src/share/vm/opto/buildOopMap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/buildOopMap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "code/vmreg.inline.hpp" #include "compiler/oopMap.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" #include "opto/compile.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/c2_globals.hpp --- a/hotspot/src/share/vm/opto/c2_globals.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/c2_globals.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -194,6 +194,9 @@ "Map number of unrolls for main loop via " \ "Superword Level Parallelism analysis") \ \ + product_pd(bool, PostLoopMultiversioning, \ + "Multi versioned post loops to eliminate range checks") \ + \ notproduct(bool, TraceSuperWordLoopUnrollAnalysis, false, \ "Trace what Superword Level Parallelism analysis applies") \ \ @@ -229,21 +232,12 @@ develop(bool, TraceLoopOpts, false, \ "Trace executed loop optimizations") \ \ - diagnostic(bool, LoopLimitCheck, true, \ - "Generate a loop limits check for overflow") \ - \ develop(bool, TraceLoopLimitCheck, false, \ "Trace generation of loop limits checks") \ \ - diagnostic(bool, RangeLimitCheck, true, \ - "Additional overflow checks during range check elimination") \ - \ develop(bool, TraceRangeLimitCheck, false, \ "Trace additional overflow checks in RCE") \ \ - diagnostic(bool, UnrollLimitCheck, true, \ - "Additional overflow checks during loop unroll") \ - \ /* OptimizeFill not yet supported on PowerPC. */ \ product(bool, OptimizeFill, true PPC64_ONLY(&& false), \ "convert fill/copy loops into intrinsic") \ @@ -595,26 +589,26 @@ product(bool, BlockLayoutRotateLoops, true, \ "Allow back branches to be fall throughs in the block layour") \ \ - diagnostic(bool, InlineReflectionGetCallerClass, true, \ + develop(bool, InlineReflectionGetCallerClass, true, \ "inline sun.reflect.Reflection.getCallerClass(), known to be " \ "part of base library DLL") \ \ - diagnostic(bool, InlineObjectCopy, true, \ + develop(bool, InlineObjectCopy, true, \ "inline Object.clone and Arrays.copyOf[Range] intrinsics") \ \ - diagnostic(bool, SpecialStringCompareTo, true, \ + develop(bool, SpecialStringCompareTo, true, \ "special version of string compareTo") \ \ - diagnostic(bool, SpecialStringIndexOf, true, \ + develop(bool, SpecialStringIndexOf, true, \ "special version of string indexOf") \ \ - diagnostic(bool, SpecialStringEquals, true, \ + develop(bool, SpecialStringEquals, true, \ "special version of string equals") \ \ - diagnostic(bool, SpecialArraysEquals, true, \ + develop(bool, SpecialArraysEquals, true, \ "special version of Arrays.equals(char[],char[])") \ \ - diagnostic(bool, SpecialEncodeISOArray, true, \ + product(bool, SpecialEncodeISOArray, true, \ "special version of ISO_8859_1$Encoder.encodeISOArray") \ \ develop(bool, BailoutToInterpreterForThrows, false, \ @@ -716,22 +710,22 @@ diagnostic(bool, OptimizeExpensiveOps, true, \ "Find best control for expensive operations") \ \ - diagnostic(bool, UseMathExactIntrinsics, true, \ + product(bool, UseMathExactIntrinsics, true, \ "Enables intrinsification of various java.lang.Math functions") \ \ - diagnostic(bool, UseMultiplyToLenIntrinsic, false, \ + product(bool, UseMultiplyToLenIntrinsic, false, \ "Enables intrinsification of BigInteger.multiplyToLen()") \ \ - diagnostic(bool, UseSquareToLenIntrinsic, false, \ + product(bool, UseSquareToLenIntrinsic, false, \ "Enables intrinsification of BigInteger.squareToLen()") \ \ - diagnostic(bool, UseMulAddIntrinsic, false, \ + product(bool, UseMulAddIntrinsic, false, \ "Enables intrinsification of BigInteger.mulAdd()") \ \ - diagnostic(bool, UseMontgomeryMultiplyIntrinsic, false, \ + product(bool, UseMontgomeryMultiplyIntrinsic, false, \ "Enables intrinsification of BigInteger.montgomeryMultiply()") \ \ - diagnostic(bool, UseMontgomerySquareIntrinsic, false, \ + product(bool, UseMontgomerySquareIntrinsic, false, \ "Enables intrinsification of BigInteger.montgomerySquare()") \ \ product(bool, UseTypeSpeculation, true, \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/cfgnode.cpp --- a/hotspot/src/share/vm/opto/cfgnode.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/cfgnode.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/castnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/chaitin.cpp --- a/hotspot/src/share/vm/opto/chaitin.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/chaitin.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "compiler/compileLog.hpp" #include "compiler/oopMap.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/block.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/compile.cpp --- a/hotspot/src/share/vm/opto/compile.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "compiler/compileLog.hpp" #include "compiler/disassembler.hpp" #include "compiler/oopMap.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/block.hpp" #include "opto/c2compiler.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/compile.hpp --- a/hotspot/src/share/vm/opto/compile.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/compile.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -39,6 +39,7 @@ #include "opto/phase.hpp" #include "opto/regmask.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vmThread.hpp" #include "trace/tracing.hpp" #include "utilities/ticks.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/domgraph.cpp --- a/hotspot/src/share/vm/opto/domgraph.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/domgraph.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" #include "opto/block.hpp" #include "opto/machnode.hpp" #include "opto/phaseX.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/escape.cpp --- a/hotspot/src/share/vm/opto/escape.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/escape.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "compiler/compileLog.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" #include "opto/c2compiler.hpp" #include "opto/arraycopynode.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/gcm.cpp --- a/hotspot/src/share/vm/opto/gcm.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/gcm.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/block.hpp" #include "opto/c2compiler.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/generateOptoStub.cpp --- a/hotspot/src/share/vm/opto/generateOptoStub.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" #include "opto/cfgnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/graphKit.cpp --- a/hotspot/src/share/vm/opto/graphKit.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/graphKit.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/castnode.hpp" #include "opto/convertnode.hpp" @@ -1190,11 +1191,6 @@ bool speculative) { assert(!assert_null || null_control == NULL, "not both at once"); if (stopped()) return top(); - if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) { - // For some performance testing, we may wish to suppress null checking. - value = cast_not_null(value); // Make it appear to be non-null (4962416). - return value; - } NOT_PRODUCT(explicit_null_checks_inserted++); // Construct NULL check @@ -1686,6 +1682,9 @@ const Type* elemtype = arytype->elem(); BasicType elembt = elemtype->array_element_basic_type(); Node* adr = array_element_address(ary, idx, elembt, arytype->size()); + if (elembt == T_NARROWOOP) { + elembt = T_OBJECT; // To satisfy switch in LoadNode::make() + } Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); return ld; } @@ -3770,9 +3769,7 @@ add_predicate_impl(Deoptimization::Reason_predicate, nargs); } // loop's limit check predicate should be near the loop. - if (LoopLimitCheck) { - add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs); - } + add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs); } //----------------------------- store barriers ---------------------------- diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/idealGraphPrinter.cpp --- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "memory/resourceArea.hpp" #include "opto/chaitin.hpp" #include "opto/idealGraphPrinter.hpp" #include "opto/machnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/ifg.cpp --- a/hotspot/src/share/vm/opto/ifg.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/ifg.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "compiler/oopMap.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/block.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/ifnode.cpp --- a/hotspot/src/share/vm/opto/ifnode.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/ifnode.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciTypeFlow.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/castnode.hpp" #include "opto/cfgnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/library_call.cpp --- a/hotspot/src/share/vm/opto/library_call.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/library_call.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/arraycopynode.hpp" @@ -6272,7 +6273,20 @@ //------------------------------get_key_start_from_aescrypt_object----------------------- Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) { +#ifdef PPC64 + // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys. + // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns. + // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption. + // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]). + Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false); + assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); + if (objSessionK == NULL) { + return (Node *) NULL; + } + Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS); +#else Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false); +#endif // PPC64 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); if (objAESCryptKey == NULL) return (Node *) NULL; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/live.cpp --- a/hotspot/src/share/vm/opto/live.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/live.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/callnode.hpp" #include "opto/chaitin.hpp" #include "opto/live.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopPredicate.cpp --- a/hotspot/src/share/vm/opto/loopPredicate.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopPredicate.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -313,11 +313,9 @@ // Search original predicates Node* entry = old_entry; ProjNode* limit_check_proj = NULL; - if (LoopLimitCheck) { - limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (limit_check_proj != NULL) { - entry = entry->in(0)->in(0); - } + limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (limit_check_proj != NULL) { + entry = entry->in(0)->in(0); } if (UseLoopPredicate) { ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); @@ -353,11 +351,9 @@ // Skip related predicates. Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { Node* predicate = NULL; - if (LoopLimitCheck) { - predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL) { - entry = entry->in(0)->in(0); - } + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL) { + entry = entry->in(0)->in(0); } if (UseLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); @@ -393,11 +389,9 @@ // Find a predicate Node* PhaseIdealLoop::find_predicate(Node* entry) { Node* predicate = NULL; - if (LoopLimitCheck) { - predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL) { // right pattern that can be used by loop predication - return entry; - } + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL) { // right pattern that can be used by loop predication + return entry; } if (UseLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); @@ -646,19 +640,13 @@ Node* max_idx_expr = init; int stride_con = stride->get_int(); if ((stride_con > 0) == (scale > 0) == upper) { - if (LoopLimitCheck) { - // With LoopLimitCheck limit is not exact. - // Calculate exact limit here. - // Note, counted loop's test is '<' or '>'. - limit = exact_limit(loop); - max_idx_expr = new SubINode(limit, stride); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) predString->print("(limit - stride) "); - } else { - max_idx_expr = new SubINode(limit, stride); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) predString->print("(limit - stride) "); - } + // Limit is not exact. + // Calculate exact limit here. + // Note, counted loop's test is '<' or '>'. + limit = exact_limit(loop); + max_idx_expr = new SubINode(limit, stride); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) predString->print("(limit - stride) "); } else { if (TraceLoopPredicate) predString->print("init "); } @@ -721,12 +709,9 @@ Node* entry = head->in(LoopNode::EntryControl); ProjNode *predicate_proj = NULL; // Loop limit check predicate should be near the loop. - if (LoopLimitCheck) { - predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate_proj != NULL) - entry = predicate_proj->in(0)->in(0); - } - + predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate_proj != NULL) + entry = predicate_proj->in(0)->in(0); predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); if (!predicate_proj) { #ifndef PRODUCT diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopTransform.cpp --- a/hotspot/src/share/vm/opto/loopTransform.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopTransform.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1027,82 +1027,9 @@ _igvn.replace_input_of(bol, 1, cmp); } - //------------------------------ - // Step A: Create Post-Loop. - Node* main_exit = main_end->proj_out(false); - assert( main_exit->Opcode() == Op_IfFalse, "" ); - int dd_main_exit = dom_depth(main_exit); - - // Step A1: Clone the loop body. The clone becomes the post-loop. The main - // loop pre-header illegally has 2 control users (old & new loops). - clone_loop( loop, old_new, dd_main_exit ); - assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); - CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); - post_head->set_post_loop(main_head); - - // Reduce the post-loop trip count. - CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); - post_end->_prob = PROB_FAIR; - - // Build the main-loop normal exit. - IfFalseNode *new_main_exit = new IfFalseNode(main_end); - _igvn.register_new_node_with_optimizer( new_main_exit ); - set_idom(new_main_exit, main_end, dd_main_exit ); - set_loop(new_main_exit, loop->_parent); - - // Step A2: Build a zero-trip guard for the post-loop. After leaving the - // main-loop, the post-loop may not execute at all. We 'opaque' the incr - // (the main-loop trip-counter exit value) because we will be changing - // the exit value (via unrolling) so we cannot constant-fold away the zero - // trip guard until all unrolling is done. - Node *zer_opaq = new Opaque1Node(C, incr); - Node *zer_cmp = new CmpINode( zer_opaq, limit ); - Node *zer_bol = new BoolNode( zer_cmp, b_test ); - register_new_node( zer_opaq, new_main_exit ); - register_new_node( zer_cmp , new_main_exit ); - register_new_node( zer_bol , new_main_exit ); - - // Build the IfNode - IfNode *zer_iff = new IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); - _igvn.register_new_node_with_optimizer( zer_iff ); - set_idom(zer_iff, new_main_exit, dd_main_exit); - set_loop(zer_iff, loop->_parent); - - // Plug in the false-path, taken if we need to skip post-loop - _igvn.replace_input_of(main_exit, 0, zer_iff); - set_idom(main_exit, zer_iff, dd_main_exit); - set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); - // Make the true-path, must enter the post loop - Node *zer_taken = new IfTrueNode( zer_iff ); - _igvn.register_new_node_with_optimizer( zer_taken ); - set_idom(zer_taken, zer_iff, dd_main_exit); - set_loop(zer_taken, loop->_parent); - // Plug in the true path - _igvn.hash_delete( post_head ); - post_head->set_req(LoopNode::EntryControl, zer_taken); - set_idom(post_head, zer_taken, dd_main_exit); - - Arena *a = Thread::current()->resource_area(); - VectorSet visited(a); - Node_Stack clones(a, main_head->back_control()->outcnt()); - // Step A3: Make the fall-in values to the post-loop come from the - // fall-out values of the main-loop. - for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { - Node* main_phi = main_head->fast_out(i); - if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { - Node *post_phi = old_new[main_phi->_idx]; - Node *fallmain = clone_up_backedge_goo(main_head->back_control(), - post_head->init_control(), - main_phi->in(LoopNode::LoopBackControl), - visited, clones); - _igvn.hash_delete(post_phi); - post_phi->set_req( LoopNode::EntryControl, fallmain ); - } - } - - // Update local caches for next stanza - main_exit = new_main_exit; - + // Add the post loop + CountedLoopNode *post_head = NULL; + Node *main_exit = insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head); //------------------------------ // Step B: Create Pre-Loop. @@ -1158,8 +1085,9 @@ main_head->set_req(LoopNode::EntryControl, min_taken); set_idom(main_head, min_taken, dd_main_head); - visited.Clear(); - clones.clear(); + Arena *a = Thread::current()->resource_area(); + VectorSet visited(a); + Node_Stack clones(a, main_head->back_control()->outcnt()); // Step B3: Make the fall-in values to the main-loop come from the // fall-out values of the pre-loop. for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { @@ -1185,12 +1113,8 @@ // variable value and the induction variable Phi to preserve correct // dependencies. - // CastII for the post loop: - bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); - assert(inserted, "no castII inserted"); - // CastII for the main loop: - inserted = cast_incr_before_loop(pre_incr, min_taken, main_head); + bool inserted = cast_incr_before_loop( pre_incr, min_taken, main_head ); assert(inserted, "no castII inserted"); // Step B4: Shorten the pre-loop to run only 1 iteration (for now). @@ -1298,19 +1222,82 @@ guarantee(main_end != NULL, "no loop exit node"); // diagnostic to show loop end is not properly formed assert(main_end->outcnt() == 2, "1 true, 1 false path only"); - uint dd_main_head = dom_depth(main_head); - uint max = main_head->outcnt(); // mark this loop as processed main_head->mark_has_atomic_post_loop(); - Node *pre_header = main_head->in(LoopNode::EntryControl); - Node *init = main_head->init_trip(); Node *incr = main_end->incr(); Node *limit = main_end->limit(); - Node *stride = main_end->stride(); - Node *cmp = main_end->cmp_node(); - BoolTest::mask b_test = main_end->test_trip(); + + // In this case we throw away the result as we are not using it to connect anything else. + CountedLoopNode *post_head = NULL; + insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head); + + // It's difficult to be precise about the trip-counts + // for post loops. They are usually very short, + // so guess that unit vector trips is a reasonable value. + post_head->set_profile_trip_cnt(cur_unroll); + + // Now force out all loop-invariant dominating tests. The optimizer + // finds some, but we _know_ they are all useless. + peeled_dom_test_elim(loop, old_new); + loop->record_for_igvn(); +} + + +//-------------------------insert_scalar_rced_post_loop------------------------ +// Insert a copy of the rce'd main loop as a post loop, +// We have not unrolled the main loop, so this is the right time to inject this. +// Later we will examine the partner of this post loop pair which still has range checks +// to see inject code which tests at runtime if the range checks are applicable. +void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List &old_new) { + if (!loop->_head->is_CountedLoop()) return; + + CountedLoopNode *cl = loop->_head->as_CountedLoop(); + + // only process RCE'd main loops + if (!cl->is_main_loop() || cl->range_checks_present()) return; + +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("PostScalarRce "); + loop->dump_head(); + } +#endif + C->set_major_progress(); + + // Find common pieces of the loop being guarded with pre & post loops + CountedLoopNode *main_head = loop->_head->as_CountedLoop(); + CountedLoopEndNode *main_end = main_head->loopexit(); + guarantee(main_end != NULL, "no loop exit node"); + // diagnostic to show loop end is not properly formed + assert(main_end->outcnt() == 2, "1 true, 1 false path only"); + + Node *incr = main_end->incr(); + Node *limit = main_end->limit(); + + // In this case we throw away the result as we are not using it to connect anything else. + CountedLoopNode *post_head = NULL; + insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head); + + // It's difficult to be precise about the trip-counts + // for post loops. They are usually very short, + // so guess that unit vector trips is a reasonable value. + post_head->set_profile_trip_cnt(4.0); + post_head->set_is_rce_post_loop(); + + // Now force out all loop-invariant dominating tests. The optimizer + // finds some, but we _know_ they are all useless. + peeled_dom_test_elim(loop, old_new); + loop->record_for_igvn(); +} + + +//------------------------------insert_post_loop------------------------------- +// Insert post loops. Add a post loop to the given loop passed. +Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new, + CountedLoopNode *main_head, CountedLoopEndNode *main_end, + Node *incr, Node *limit, CountedLoopNode *&post_head) { //------------------------------ // Step A: Create a new post-Loop. @@ -1322,7 +1309,7 @@ // The main loop pre-header illegally has 2 control users (old & new loops). clone_loop(loop, old_new, dd_main_exit); assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, ""); - CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); + post_head = old_new[main_head->_idx]->as_CountedLoop(); post_head->set_normal_loop(); post_head->set_post_loop(main_head); @@ -1336,14 +1323,14 @@ set_idom(new_main_exit, main_end, dd_main_exit); set_loop(new_main_exit, loop->_parent); - // Step A2: Build a zero-trip guard for the vector post-loop. After leaving the - // main-loop, the vector post-loop may not execute at all. We 'opaque' the incr - // (the vectorized main-loop trip-counter exit value) because we will be changing + // Step A2: Build a zero-trip guard for the post-loop. After leaving the + // main-loop, the post-loop may not execute at all. We 'opaque' the incr + // (the previous loop trip-counter exit value) because we will be changing // the exit value (via additional unrolling) so we cannot constant-fold away the zero // trip guard until all unrolling is done. Node *zer_opaq = new Opaque1Node(C, incr); Node *zer_cmp = new CmpINode(zer_opaq, limit); - Node *zer_bol = new BoolNode(zer_cmp, b_test); + Node *zer_bol = new BoolNode(zer_cmp, main_end->test_trip()); register_new_node(zer_opaq, new_main_exit); register_new_node(zer_cmp, new_main_exit); register_new_node(zer_bol, new_main_exit); @@ -1354,11 +1341,11 @@ set_idom(zer_iff, new_main_exit, dd_main_exit); set_loop(zer_iff, loop->_parent); - // Plug in the false-path, taken if we need to skip vector post-loop + // Plug in the false-path, taken if we need to skip this post-loop _igvn.replace_input_of(main_exit, 0, zer_iff); set_idom(main_exit, zer_iff, dd_main_exit); set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); - // Make the true-path, must enter the vector post loop + // Make the true-path, must enter this post loop Node *zer_taken = new IfTrueNode(zer_iff); _igvn.register_new_node_with_optimizer(zer_taken); set_idom(zer_taken, zer_iff, dd_main_exit); @@ -1371,7 +1358,7 @@ Arena *a = Thread::current()->resource_area(); VectorSet visited(a); Node_Stack clones(a, main_head->back_control()->outcnt()); - // Step A3: Make the fall-in values to the vector post-loop come from the + // Step A3: Make the fall-in values to the post-loop come from the // fall-out values of the main-loop. for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { Node* main_phi = main_head->fast_out(i); @@ -1390,15 +1377,7 @@ bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); assert(inserted, "no castII inserted"); - // It's difficult to be precise about the trip-counts - // for post loops. They are usually very short, - // so guess that unit vector trips is a reasonable value. - post_head->set_profile_trip_cnt((float)slp_max_unroll_factor); - - // Now force out all loop-invariant dominating tests. The optimizer - // finds some, but we _know_ they are all useless. - peeled_dom_test_elim(loop, old_new); - loop->record_for_igvn(); + return new_main_exit; } //------------------------------is_invariant----------------------------- @@ -1457,7 +1436,7 @@ // Check the shape of the graph at the loop entry. If an inappropriate // graph shape is encountered, the compiler bails out loop unrolling; // compilation of the method will still succeed. - if (!is_canonical_main_loop_entry(loop_head)) { + if (!is_canonical_loop_entry(loop_head)) { return; } opaq = ctrl->in(0)->in(1)->in(1)->in(2); @@ -1468,209 +1447,156 @@ C->set_major_progress(); Node* new_limit = NULL; - if (UnrollLimitCheck) { - int stride_con = stride->get_int(); - int stride_p = (stride_con > 0) ? stride_con : -stride_con; - uint old_trip_count = loop_head->trip_count(); - // Verify that unroll policy result is still valid. - assert(old_trip_count > 1 && - (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); - - // Adjust loop limit to keep valid iterations number after unroll. - // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride - // which may overflow. - if (!adjust_min_trip) { - assert(old_trip_count > 1 && (old_trip_count & 1) == 0, - "odd trip count for maximally unroll"); - // Don't need to adjust limit for maximally unroll since trip count is even. - } else if (loop_head->has_exact_trip_count() && init->is_Con()) { - // Loop's limit is constant. Loop's init could be constant when pre-loop - // become peeled iteration. - jlong init_con = init->get_int(); - // We can keep old loop limit if iterations count stays the same: - // old_trip_count == new_trip_count * 2 - // Note: since old_trip_count >= 2 then new_trip_count >= 1 - // so we also don't need to adjust zero trip test. - jlong limit_con = limit->get_int(); - // (stride_con*2) not overflow since stride_con <= 8. - int new_stride_con = stride_con * 2; - int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); - jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; - // New trip count should satisfy next conditions. - assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); - uint new_trip_count = (uint)trip_count; - adjust_min_trip = (old_trip_count != new_trip_count*2); - } - - if (adjust_min_trip) { - // Step 2: Adjust the trip limit if it is called for. - // The adjustment amount is -stride. Need to make sure if the - // adjustment underflows or overflows, then the main loop is skipped. - Node* cmp = loop_end->cmp_node(); - assert(cmp->in(2) == limit, "sanity"); - assert(opaq != NULL && opaq->in(1) == limit, "sanity"); - - // Verify that policy_unroll result is still valid. - const TypeInt* limit_type = _igvn.type(limit)->is_int(); - assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || - stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); - - if (limit->is_Con()) { - // The check in policy_unroll and the assert above guarantee - // no underflow if limit is constant. - new_limit = _igvn.intcon(limit->get_int() - stride_con); - set_ctrl(new_limit, C->root()); + int stride_con = stride->get_int(); + int stride_p = (stride_con > 0) ? stride_con : -stride_con; + uint old_trip_count = loop_head->trip_count(); + // Verify that unroll policy result is still valid. + assert(old_trip_count > 1 && + (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); + + // Adjust loop limit to keep valid iterations number after unroll. + // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride + // which may overflow. + if (!adjust_min_trip) { + assert(old_trip_count > 1 && (old_trip_count & 1) == 0, + "odd trip count for maximally unroll"); + // Don't need to adjust limit for maximally unroll since trip count is even. + } else if (loop_head->has_exact_trip_count() && init->is_Con()) { + // Loop's limit is constant. Loop's init could be constant when pre-loop + // become peeled iteration. + jlong init_con = init->get_int(); + // We can keep old loop limit if iterations count stays the same: + // old_trip_count == new_trip_count * 2 + // Note: since old_trip_count >= 2 then new_trip_count >= 1 + // so we also don't need to adjust zero trip test. + jlong limit_con = limit->get_int(); + // (stride_con*2) not overflow since stride_con <= 8. + int new_stride_con = stride_con * 2; + int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); + jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; + // New trip count should satisfy next conditions. + assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); + uint new_trip_count = (uint)trip_count; + adjust_min_trip = (old_trip_count != new_trip_count*2); + } + + if (adjust_min_trip) { + // Step 2: Adjust the trip limit if it is called for. + // The adjustment amount is -stride. Need to make sure if the + // adjustment underflows or overflows, then the main loop is skipped. + Node* cmp = loop_end->cmp_node(); + assert(cmp->in(2) == limit, "sanity"); + assert(opaq != NULL && opaq->in(1) == limit, "sanity"); + + // Verify that policy_unroll result is still valid. + const TypeInt* limit_type = _igvn.type(limit)->is_int(); + assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || + stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); + + if (limit->is_Con()) { + // The check in policy_unroll and the assert above guarantee + // no underflow if limit is constant. + new_limit = _igvn.intcon(limit->get_int() - stride_con); + set_ctrl(new_limit, C->root()); + } else { + // Limit is not constant. + if (loop_head->unrolled_count() == 1) { // only for first unroll + // Separate limit by Opaque node in case it is an incremented + // variable from previous loop to avoid using pre-incremented + // value which could increase register pressure. + // Otherwise reorg_offsets() optimization will create a separate + // Opaque node for each use of trip-counter and as result + // zero trip guard limit will be different from loop limit. + assert(has_ctrl(opaq), "should have it"); + Node* opaq_ctrl = get_ctrl(opaq); + limit = new Opaque2Node( C, limit ); + register_new_node( limit, opaq_ctrl ); + } + if (stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo) || + stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi)) { + // No underflow. + new_limit = new SubINode(limit, stride); } else { - // Limit is not constant. - if (loop_head->unrolled_count() == 1) { // only for first unroll - // Separate limit by Opaque node in case it is an incremented - // variable from previous loop to avoid using pre-incremented - // value which could increase register pressure. - // Otherwise reorg_offsets() optimization will create a separate - // Opaque node for each use of trip-counter and as result - // zero trip guard limit will be different from loop limit. - assert(has_ctrl(opaq), "should have it"); - Node* opaq_ctrl = get_ctrl(opaq); - limit = new Opaque2Node( C, limit ); - register_new_node( limit, opaq_ctrl ); - } - if (stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo) || - stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi)) { - // No underflow. - new_limit = new SubINode(limit, stride); + // (limit - stride) may underflow. + // Clamp the adjustment value with MININT or MAXINT: + // + // new_limit = limit-stride + // if (stride > 0) + // new_limit = (limit < new_limit) ? MININT : new_limit; + // else + // new_limit = (limit > new_limit) ? MAXINT : new_limit; + // + BoolTest::mask bt = loop_end->test_trip(); + assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); + Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); + set_ctrl(adj_max, C->root()); + Node* old_limit = NULL; + Node* adj_limit = NULL; + Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; + if (loop_head->unrolled_count() > 1 && + limit->is_CMove() && limit->Opcode() == Op_CMoveI && + limit->in(CMoveNode::IfTrue) == adj_max && + bol->as_Bool()->_test._test == bt && + bol->in(1)->Opcode() == Op_CmpI && + bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { + // Loop was unrolled before. + // Optimize the limit to avoid nested CMove: + // use original limit as old limit. + old_limit = bol->in(1)->in(1); + // Adjust previous adjusted limit. + adj_limit = limit->in(CMoveNode::IfFalse); + adj_limit = new SubINode(adj_limit, stride); } else { - // (limit - stride) may underflow. - // Clamp the adjustment value with MININT or MAXINT: - // - // new_limit = limit-stride - // if (stride > 0) - // new_limit = (limit < new_limit) ? MININT : new_limit; - // else - // new_limit = (limit > new_limit) ? MAXINT : new_limit; - // - BoolTest::mask bt = loop_end->test_trip(); - assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); - Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); - set_ctrl(adj_max, C->root()); - Node* old_limit = NULL; - Node* adj_limit = NULL; - Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; - if (loop_head->unrolled_count() > 1 && - limit->is_CMove() && limit->Opcode() == Op_CMoveI && - limit->in(CMoveNode::IfTrue) == adj_max && - bol->as_Bool()->_test._test == bt && - bol->in(1)->Opcode() == Op_CmpI && - bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { - // Loop was unrolled before. - // Optimize the limit to avoid nested CMove: - // use original limit as old limit. - old_limit = bol->in(1)->in(1); - // Adjust previous adjusted limit. - adj_limit = limit->in(CMoveNode::IfFalse); - adj_limit = new SubINode(adj_limit, stride); - } else { - old_limit = limit; - adj_limit = new SubINode(limit, stride); - } - assert(old_limit != NULL && adj_limit != NULL, ""); - register_new_node( adj_limit, ctrl ); // adjust amount - Node* adj_cmp = new CmpINode(old_limit, adj_limit); - register_new_node( adj_cmp, ctrl ); - Node* adj_bool = new BoolNode(adj_cmp, bt); - register_new_node( adj_bool, ctrl ); - new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); + old_limit = limit; + adj_limit = new SubINode(limit, stride); } - register_new_node(new_limit, ctrl); + assert(old_limit != NULL && adj_limit != NULL, ""); + register_new_node( adj_limit, ctrl ); // adjust amount + Node* adj_cmp = new CmpINode(old_limit, adj_limit); + register_new_node( adj_cmp, ctrl ); + Node* adj_bool = new BoolNode(adj_cmp, bt); + register_new_node( adj_bool, ctrl ); + new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); } - assert(new_limit != NULL, ""); - // Replace in loop test. - assert(loop_end->in(1)->in(1) == cmp, "sanity"); - if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { - // Don't need to create new test since only one user. - _igvn.hash_delete(cmp); - cmp->set_req(2, new_limit); - } else { - // Create new test since it is shared. - Node* ctrl2 = loop_end->in(0); - Node* cmp2 = cmp->clone(); - cmp2->set_req(2, new_limit); - register_new_node(cmp2, ctrl2); - Node* bol2 = loop_end->in(1)->clone(); - bol2->set_req(1, cmp2); - register_new_node(bol2, ctrl2); - _igvn.replace_input_of(loop_end, 1, bol2); - } - // Step 3: Find the min-trip test guaranteed before a 'main' loop. - // Make it a 1-trip test (means at least 2 trips). - - // Guard test uses an 'opaque' node which is not shared. Hence I - // can edit it's inputs directly. Hammer in the new limit for the - // minimum-trip guard. - assert(opaq->outcnt() == 1, ""); - _igvn.replace_input_of(opaq, 1, new_limit); + register_new_node(new_limit, ctrl); } - - // Adjust max trip count. The trip count is intentionally rounded - // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, - // the main, unrolled, part of the loop will never execute as it is protected - // by the min-trip test. See bug 4834191 for a case where we over-unrolled - // and later determined that part of the unrolled loop was dead. - loop_head->set_trip_count(old_trip_count / 2); - - // Double the count of original iterations in the unrolled loop body. - loop_head->double_unrolled_count(); - - } else { // LoopLimitCheck - - // Adjust max trip count. The trip count is intentionally rounded - // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, - // the main, unrolled, part of the loop will never execute as it is protected - // by the min-trip test. See bug 4834191 for a case where we over-unrolled - // and later determined that part of the unrolled loop was dead. - loop_head->set_trip_count(loop_head->trip_count() / 2); - - // Double the count of original iterations in the unrolled loop body. - loop_head->double_unrolled_count(); - - // ----------- - // Step 2: Cut back the trip counter for an unroll amount of 2. - // Loop will normally trip (limit - init)/stride_con. Since it's a - // CountedLoop this is exact (stride divides limit-init exactly). - // We are going to double the loop body, so we want to knock off any - // odd iteration: (trip_cnt & ~1). Then back compute a new limit. - Node *span = new SubINode( limit, init ); - register_new_node( span, ctrl ); - Node *trip = new DivINode( 0, span, stride ); - register_new_node( trip, ctrl ); - Node *mtwo = _igvn.intcon(-2); - set_ctrl(mtwo, C->root()); - Node *rond = new AndINode( trip, mtwo ); - register_new_node( rond, ctrl ); - Node *spn2 = new MulINode( rond, stride ); - register_new_node( spn2, ctrl ); - new_limit = new AddINode( spn2, init ); - register_new_node( new_limit, ctrl ); - - // Hammer in the new limit - Node *ctrl2 = loop_end->in(0); - Node *cmp2 = new CmpINode( loop_head->incr(), new_limit ); - register_new_node( cmp2, ctrl2 ); - Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() ); - register_new_node( bol2, ctrl2 ); - _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2); - + assert(new_limit != NULL, ""); + // Replace in loop test. + assert(loop_end->in(1)->in(1) == cmp, "sanity"); + if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { + // Don't need to create new test since only one user. + _igvn.hash_delete(cmp); + cmp->set_req(2, new_limit); + } else { + // Create new test since it is shared. + Node* ctrl2 = loop_end->in(0); + Node* cmp2 = cmp->clone(); + cmp2->set_req(2, new_limit); + register_new_node(cmp2, ctrl2); + Node* bol2 = loop_end->in(1)->clone(); + bol2->set_req(1, cmp2); + register_new_node(bol2, ctrl2); + _igvn.replace_input_of(loop_end, 1, bol2); + } // Step 3: Find the min-trip test guaranteed before a 'main' loop. // Make it a 1-trip test (means at least 2 trips). - if( adjust_min_trip ) { - assert( new_limit != NULL, "" ); - // Guard test uses an 'opaque' node which is not shared. Hence I - // can edit it's inputs directly. Hammer in the new limit for the - // minimum-trip guard. - assert( opaq->outcnt() == 1, "" ); - _igvn.hash_delete(opaq); - opaq->set_req(1, new_limit); - } - } // LoopLimitCheck + + // Guard test uses an 'opaque' node which is not shared. Hence I + // can edit it's inputs directly. Hammer in the new limit for the + // minimum-trip guard. + assert(opaq->outcnt() == 1, ""); + _igvn.replace_input_of(opaq, 1, new_limit); + } + + // Adjust max trip count. The trip count is intentionally rounded + // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, + // the main, unrolled, part of the loop will never execute as it is protected + // by the min-trip test. See bug 4834191 for a case where we over-unrolled + // and later determined that part of the unrolled loop was dead. + loop_head->set_trip_count(old_trip_count / 2); + + // Double the count of original iterations in the unrolled loop body. + loop_head->double_unrolled_count(); // --------- // Step 4: Clone the loop body. Move it inside the loop. This loop body @@ -1904,7 +1830,6 @@ // ) if (low_limit->get_int() == -max_jint) { - if (!RangeLimitCheck) return; // We need this guard when scale*pre_limit+offset >= limit // due to underflow. So we need execute pre-loop until // scale*I+offset >= min_int. But (min_int-offset) will @@ -1956,7 +1881,6 @@ *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); if (low_limit->get_int() == -max_jint) { - if (!RangeLimitCheck) return; // We need this guard when scale*main_limit+offset >= limit // due to underflow. So we need execute main-loop while // scale*I+offset+1 > min_int. But (min_int-offset-1) will @@ -2091,7 +2015,7 @@ //------------------------------do_range_check--------------------------------- // Eliminate range-checks and other trip-counter vs loop-invariant tests. -void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { +int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { #ifndef PRODUCT if (PrintOpto && VerifyLoopOptimizations) { tty->print("Range Check Elimination "); @@ -2103,10 +2027,12 @@ #endif assert(RangeCheckElimination, ""); CountedLoopNode *cl = loop->_head->as_CountedLoop(); + // If we fail before trying to eliminate range checks, set multiversion state + int closed_range_checks = 1; // protect against stride not being a constant if (!cl->stride_is_con()) - return; + return closed_range_checks; // Find the trip counter; we are iteration splitting based on it Node *trip_counter = cl->phi(); @@ -2117,8 +2043,8 @@ // Check graph shape. Cannot optimize a loop if zero-trip // Opaque1 node is optimized away and then another round // of loop opts attempted. - if (!is_canonical_main_loop_entry(cl)) { - return; + if (!is_canonical_loop_entry(cl)) { + return closed_range_checks; } // Need to find the main-loop zero-trip guard @@ -2132,7 +2058,7 @@ Node *p_f = iffm->in(0); // pre loop may have been optimized out if (p_f->Opcode() != Op_IfFalse) { - return; + return closed_range_checks; } CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); assert(pre_end->loopnode()->is_pre_loop(), ""); @@ -2141,7 +2067,7 @@ // optimized away and then another round of loop opts attempted. // We can not optimize this particular loop in that case. if (pre_opaq1->Opcode() != Op_Opaque1) - return; + return closed_range_checks; Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; Node *pre_limit = pre_opaq->in(1); @@ -2152,7 +2078,7 @@ // pre-loop Opaque1 node. Node *orig_limit = pre_opaq->original_loop_limit(); if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) - return; + return closed_range_checks; // Must know if its a count-up or count-down loop @@ -2173,6 +2099,10 @@ // executed. bool conditional_rc = false; + // Count number of range checks and reduce by load range limits, if zero, + // the loop is in canonical form to multiversion. + closed_range_checks = 0; + // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. for( uint i = 0; i < loop->_body.size(); i++ ) { @@ -2181,6 +2111,7 @@ iff->Opcode() == Op_RangeCheck) { // Test? // Test is an IfNode, has 2 projections. If BOTH are in the loop // we need loop unswitching instead of iteration splitting. + closed_range_checks++; Node *exit = loop->is_loop_exit(iff); if( !exit ) continue; int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; @@ -2258,7 +2189,7 @@ add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); if (!conditional_rc) { // (0-offset)/scale could be outside of loop iterations range. - conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; + conditional_rc = !loop->dominates_backedge(iff); } } else { if (PrintOpto) { @@ -2275,7 +2206,7 @@ scale_con = -scale_con; offset = new SubINode( zero, offset ); register_new_node( offset, pre_ctrl ); - limit = new SubINode( zero, limit ); + limit = new SubINode( zero, limit ); register_new_node( limit, pre_ctrl ); // Fall into LE case case BoolTest::le: @@ -2294,7 +2225,7 @@ // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could // still be outside of loop range. - conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; + conditional_rc = !loop->dominates_backedge(iff); } break; default: @@ -2324,6 +2255,9 @@ --imax; } } + if (limit->Opcode() == Op_LoadRange) { + closed_range_checks--; + } } // End of is IF @@ -2340,26 +2274,6 @@ // Note:: we are making the main loop limit no longer precise; // need to round up based on stride. cl->set_nonexact_trip_count(); - if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case - // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init - // Hopefully, compiler will optimize for powers of 2. - Node *ctrl = get_ctrl(main_limit); - Node *stride = cl->stride(); - Node *init = cl->init_trip()->uncast(); - Node *span = new SubINode(main_limit,init); - register_new_node(span,ctrl); - Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); - Node *add = new AddINode(span,rndup); - register_new_node(add,ctrl); - Node *div = new DivINode(0,add,stride); - register_new_node(div,ctrl); - Node *mul = new MulINode(div,stride); - register_new_node(mul,ctrl); - Node *newlim = new AddINode(mul,init); - register_new_node(newlim,ctrl); - main_limit = newlim; - } - Node *main_cle = cl->loopexit(); Node *main_bol = main_cle->in(1); // Hacking loop bounds; need private copies of exit test @@ -2379,6 +2293,169 @@ // The OpaqueNode is unshared by design assert( opqzm->outcnt() == 1, "cannot hack shared node" ); _igvn.replace_input_of(opqzm, 1, main_limit); + + return closed_range_checks; +} + +//------------------------------has_range_checks------------------------------- +// Check to see if RCE cleaned the current loop of range-checks. +void PhaseIdealLoop::has_range_checks(IdealLoopTree *loop) { + assert(RangeCheckElimination, ""); + + // skip if not a counted loop + if (!loop->is_counted()) return; + + CountedLoopNode *cl = loop->_head->as_CountedLoop(); + + // skip this loop if it is already checked + if (cl->has_been_range_checked()) return; + + // Now check for existance of range checks + for (uint i = 0; i < loop->_body.size(); i++) { + Node *iff = loop->_body[i]; + int iff_opc = iff->Opcode(); + if (iff_opc == Op_If || iff_opc == Op_RangeCheck) { + cl->mark_has_range_checks(); + break; + } + } + cl->set_has_been_range_checked(); +} + +//-------------------------multi_version_post_loops---------------------------- +// Check the range checks that remain, if simple, use the bounds to guard +// which version to a post loop we execute, one with range checks or one without +bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop) { + bool multi_version_succeeded = false; + assert(RangeCheckElimination, ""); + CountedLoopNode *legacy_cl = legacy_loop->_head->as_CountedLoop(); + assert(legacy_cl->is_post_loop(), ""); + + // Check for existance of range checks using the unique instance to make a guard with + Unique_Node_List worklist; + for (uint i = 0; i < legacy_loop->_body.size(); i++) { + Node *iff = legacy_loop->_body[i]; + int iff_opc = iff->Opcode(); + if (iff_opc == Op_If || iff_opc == Op_RangeCheck) { + worklist.push(iff); + } + } + + // Find RCE'd post loop so that we can stage its guard. + if (!is_canonical_loop_entry(legacy_cl)) return multi_version_succeeded; + Node* ctrl = legacy_cl->in(LoopNode::EntryControl); + Node* iffm = ctrl->in(0); + + // Now we test that both the post loops are connected + Node* post_loop_region = iffm->in(0); + if (post_loop_region == NULL) return multi_version_succeeded; + if (!post_loop_region->is_Region()) return multi_version_succeeded; + Node* covering_region = post_loop_region->in(RegionNode::Control+1); + if (covering_region == NULL) return multi_version_succeeded; + if (!covering_region->is_Region()) return multi_version_succeeded; + Node* p_f = covering_region->in(RegionNode::Control); + if (p_f == NULL) return multi_version_succeeded; + if (!p_f->is_IfFalse()) return multi_version_succeeded; + if (!p_f->in(0)->is_CountedLoopEnd()) return multi_version_succeeded; + CountedLoopEndNode* rce_loop_end = p_f->in(0)->as_CountedLoopEnd(); + if (rce_loop_end == NULL) return multi_version_succeeded; + CountedLoopNode* rce_cl = rce_loop_end->loopnode(); + if (rce_cl == NULL || !rce_cl->is_post_loop()) return multi_version_succeeded; + CountedLoopNode *known_rce_cl = rce_loop->_head->as_CountedLoop(); + if (rce_cl != known_rce_cl) return multi_version_succeeded; + + // Then we fetch the cover entry test + ctrl = rce_cl->in(LoopNode::EntryControl); + if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return multi_version_succeeded; + +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("PostMultiVersion\n"); + rce_loop->dump_head(); + legacy_loop->dump_head(); + } +#endif + + // Now fetch the limit we want to compare against + Node *limit = rce_cl->limit(); + bool first_time = true; + + // If we got this far, we identified the post loop which has been RCE'd and + // we have a work list. Now we will try to transform the if guard to cause + // the loop pair to be multi version executed with the determination left to runtime + // or the optimizer if full information is known about the given arrays at compile time. + Node *last_min = NULL; + multi_version_succeeded = true; + while (worklist.size()) { + Node* rc_iffm = worklist.pop(); + if (rc_iffm->is_If()) { + Node *rc_bolzm = rc_iffm->in(1); + if (rc_bolzm->is_Bool()) { + Node *rc_cmpzm = rc_bolzm->in(1); + if (rc_cmpzm->is_Cmp()) { + Node *rc_left = rc_cmpzm->in(2); + if (rc_left->Opcode() != Op_LoadRange) { + multi_version_succeeded = false; + break; + } + if (first_time) { + last_min = rc_left; + first_time = false; + } else { + Node *cur_min = new MinINode(last_min, rc_left); + last_min = cur_min; + _igvn.register_new_node_with_optimizer(last_min); + } + } + } + } + } + + // All we have to do is update the limit of the rce loop + // with the min of our expression and the current limit. + // We will use this expression to replace the current limit. + if (last_min && multi_version_succeeded) { + Node *cur_min = new MinINode(last_min, limit); + _igvn.register_new_node_with_optimizer(cur_min); + Node *cmp_node = rce_loop_end->cmp_node(); + _igvn.replace_input_of(cmp_node, 2, cur_min); + set_idom(cmp_node, cur_min, dom_depth(ctrl)); + set_ctrl(cur_min, ctrl); + set_loop(cur_min, rce_loop->_parent); + + legacy_cl->mark_is_multiversioned(); + rce_cl->mark_is_multiversioned(); + multi_version_succeeded = true; + + C->set_major_progress(); + } + + return multi_version_succeeded; +} + +//-------------------------poison_rce_post_loop-------------------------------- +// Causes the rce'd post loop to be optimized away if multiverioning fails +void PhaseIdealLoop::poison_rce_post_loop(IdealLoopTree *rce_loop) { + CountedLoopNode *rce_cl = rce_loop->_head->as_CountedLoop(); + Node* ctrl = rce_cl->in(LoopNode::EntryControl); + if (ctrl->is_IfTrue() || ctrl->is_IfFalse()) { + Node* iffm = ctrl->in(0); + if (iffm->is_If()) { + Node* cur_bool = iffm->in(1); + if (cur_bool->is_Bool()) { + Node* cur_cmp = cur_bool->in(1); + if (cur_cmp->is_Cmp()) { + BoolTest::mask new_test = BoolTest::gt; + BoolNode *new_bool = new BoolNode(cur_cmp, new_test); + _igvn.replace_node(cur_bool, new_bool); + _igvn._worklist.push(new_bool); + Node* left_op = cur_cmp->in(1); + _igvn.replace_input_of(cur_cmp, 2, left_op); + C->set_major_progress(); + } + } + } + } } //------------------------------DCE_loop_body---------------------------------- @@ -2738,8 +2815,20 @@ // Adjust the pre- and main-loop limits to let the pre and post loops run // with full checks, but the main-loop with no checks. Remove said // checks from the main body. - if (should_rce) - phase->do_range_check(this,old_new); + if (should_rce) { + if (phase->do_range_check(this, old_new) != 0) { + cl->mark_has_range_checks(); + } + } else { + phase->has_range_checks(this); + } + + if (should_unroll && !should_peel && PostLoopMultiversioning) { + // Try to setup multiversioning on main loops before they are unrolled + if (cl->is_main_loop() && (cl->unrolled_count() == 1)) { + phase->insert_scalar_rced_post_loop(this, old_new); + } + } // Double loop body for unrolling. Adjust the minimum-trip test (will do // twice as many iterations as before) and the main body limit (only do diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopUnswitch.cpp --- a/hotspot/src/share/vm/opto/loopUnswitch.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopUnswitch.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -138,7 +138,7 @@ Node* uniqc = proj_true->unique_ctrl_out(); Node* entry = head->in(LoopNode::EntryControl); Node* predicate = find_predicate(entry); - if (predicate != NULL && LoopLimitCheck && UseLoopPredicate) { + if (predicate != NULL && UseLoopPredicate) { // We may have two predicates, find first. entry = find_predicate(entry->in(0)->in(0)); if (entry != NULL) predicate = entry; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopnode.cpp --- a/hotspot/src/share/vm/opto/loopnode.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopnode.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "compiler/compileLog.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" #include "opto/connode.hpp" @@ -463,8 +464,6 @@ Node *hook = new Node(6); - if (LoopLimitCheck) { - // =================================================== // Generate loop limit check to avoid integer overflow // in cases like next (cyclic loops): @@ -593,103 +592,6 @@ } set_subtree_ctrl( limit ); - } else { // LoopLimitCheck - - // If compare points to incr, we are ok. Otherwise the compare - // can directly point to the phi; in this case adjust the compare so that - // it points to the incr by adjusting the limit. - if (cmp->in(1) == phi || cmp->in(2) == phi) - limit = gvn->transform(new AddINode(limit,stride)); - - // trip-count for +-tive stride should be: (limit - init_trip + stride - 1)/stride. - // Final value for iterator should be: trip_count * stride + init_trip. - Node *one_p = gvn->intcon( 1); - Node *one_m = gvn->intcon(-1); - - Node *trip_count = NULL; - switch( bt ) { - case BoolTest::eq: - ShouldNotReachHere(); - case BoolTest::ne: // Ahh, the case we desire - if (stride_con == 1) - trip_count = gvn->transform(new SubINode(limit,init_trip)); - else if (stride_con == -1) - trip_count = gvn->transform(new SubINode(init_trip,limit)); - else - ShouldNotReachHere(); - set_subtree_ctrl(trip_count); - //_loop.map(trip_count->_idx,loop(limit)); - break; - case BoolTest::le: // Maybe convert to '<' case - limit = gvn->transform(new AddINode(limit,one_p)); - set_subtree_ctrl( limit ); - hook->init_req(4, limit); - - bt = BoolTest::lt; - // Make the new limit be in the same loop nest as the old limit - //_loop.map(limit->_idx,limit_loop); - // Fall into next case - case BoolTest::lt: { // Maybe convert to '!=' case - if (stride_con < 0) // Count down loop rolls through MAXINT - ShouldNotReachHere(); - Node *range = gvn->transform(new SubINode(limit,init_trip)); - set_subtree_ctrl( range ); - hook->init_req(0, range); - - Node *bias = gvn->transform(new AddINode(range,stride)); - set_subtree_ctrl( bias ); - hook->init_req(1, bias); - - Node *bias1 = gvn->transform(new AddINode(bias,one_m)); - set_subtree_ctrl( bias1 ); - hook->init_req(2, bias1); - - trip_count = gvn->transform(new DivINode(0,bias1,stride)); - set_subtree_ctrl( trip_count ); - hook->init_req(3, trip_count); - break; - } - - case BoolTest::ge: // Maybe convert to '>' case - limit = gvn->transform(new AddINode(limit,one_m)); - set_subtree_ctrl( limit ); - hook->init_req(4 ,limit); - - bt = BoolTest::gt; - // Make the new limit be in the same loop nest as the old limit - //_loop.map(limit->_idx,limit_loop); - // Fall into next case - case BoolTest::gt: { // Maybe convert to '!=' case - if (stride_con > 0) // count up loop rolls through MININT - ShouldNotReachHere(); - Node *range = gvn->transform(new SubINode(limit,init_trip)); - set_subtree_ctrl( range ); - hook->init_req(0, range); - - Node *bias = gvn->transform(new AddINode(range,stride)); - set_subtree_ctrl( bias ); - hook->init_req(1, bias); - - Node *bias1 = gvn->transform(new AddINode(bias,one_p)); - set_subtree_ctrl( bias1 ); - hook->init_req(2, bias1); - - trip_count = gvn->transform(new DivINode(0,bias1,stride)); - set_subtree_ctrl( trip_count ); - hook->init_req(3, trip_count); - break; - } - } // switch( bt ) - - Node *span = gvn->transform(new MulINode(trip_count,stride)); - set_subtree_ctrl( span ); - hook->init_req(5, span); - - limit = gvn->transform(new AddINode(span,init_trip)); - set_subtree_ctrl( limit ); - - } // LoopLimitCheck - if (!UseCountedLoopSafepoints) { // Check for SafePoint on backedge and remove Node *sfpt = x->in(LoopNode::LoopBackControl); @@ -829,7 +731,7 @@ CountedLoopNode *cl = loop->_head->as_CountedLoop(); assert(cl->is_valid_counted_loop(), ""); - if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 || + if (ABS(cl->stride_con()) == 1 || cl->limit()->Opcode() == Op_LoopLimit) { // Old code has exact limit (it could be incorrect in case of int overflow). // Loop limit is exact with stride == 1. And loop may already have exact limit. @@ -1897,12 +1799,10 @@ tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); if (_irreducible) tty->print(" IRREDUCIBLE"); Node* entry = _head->in(LoopNode::EntryControl); - if (LoopLimitCheck) { - Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL ) { - tty->print(" limit_check"); - entry = entry->in(0)->in(0); - } + Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL ) { + tty->print(" limit_check"); + entry = entry->in(0)->in(0); } if (UseLoopPredicate) { entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); @@ -1933,6 +1833,9 @@ if (cl->is_pre_loop ()) tty->print(" pre" ); if (cl->is_main_loop()) tty->print(" main"); if (cl->is_post_loop()) tty->print(" post"); + if (cl->is_vectorized_loop()) tty->print(" vector"); + if (cl->range_checks_present()) tty->print(" rc "); + if (cl->is_multiversioned()) tty->print(" multi "); } if (_has_call) tty->print(" has_call"); if (_has_sfpt) tty->print(" has_sfpt"); @@ -2322,7 +2225,7 @@ // Some parser-inserted loop predicates could never be used by loop // predication or they were moved away from loop during some optimizations. // For example, peeling. Eliminate them before next loop optimizations. - if (UseLoopPredicate || LoopLimitCheck) { + if (UseLoopPredicate) { eliminate_useless_predicates(); } @@ -2451,7 +2354,30 @@ for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { IdealLoopTree* lpt = iter.current(); if (lpt->is_counted()) { - sw.transform_loop(lpt, true); + CountedLoopNode *cl = lpt->_head->as_CountedLoop(); + + if (PostLoopMultiversioning && cl->is_rce_post_loop() && !cl->is_vectorized_loop()) { + // Check that the rce'd post loop is encountered first, multiversion after all + // major main loop optimization are concluded + if (!C->major_progress()) { + IdealLoopTree *lpt_next = lpt->_next; + if (lpt_next && lpt_next->is_counted()) { + CountedLoopNode *cl = lpt_next->_head->as_CountedLoop(); + has_range_checks(lpt_next); + if (cl->is_post_loop() && cl->range_checks_present()) { + if (!cl->is_multiversioned()) { + if (multi_version_post_loops(lpt, lpt_next) == false) { + // Cause the rce loop to be optimized away if we fail + cl->mark_is_multiversioned(); + poison_rce_post_loop(lpt); + } + } + } + } + } + } else if (cl->is_main_loop()) { + sw.transform_loop(lpt, true); + } } } } @@ -3285,8 +3211,10 @@ // loop unswitching, and IGVN, or a combination of them) can freely change // the graph's shape. As a result, the graph shape outlined below cannot // be guaranteed anymore. -bool PhaseIdealLoop::is_canonical_main_loop_entry(CountedLoopNode* cl) { - assert(cl->is_main_loop(), "check should be applied to main loops"); +bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) { + if (!cl->is_main_loop() && !cl->is_post_loop()) { + return false; + } Node* ctrl = cl->in(LoopNode::EntryControl); if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { return false; @@ -3303,8 +3231,16 @@ if (cmpzm == NULL || !cmpzm->is_Cmp()) { return false; } - Node* opqzm = cmpzm->in(2); - if (opqzm == NULL || opqzm->Opcode() != Op_Opaque1) { + // compares can get conditionally flipped + bool found_opaque = false; + for (uint i = 1; i < cmpzm->req(); i++) { + Node* opnd = cmpzm->in(i); + if (opnd && opnd->Opcode() == Op_Opaque1) { + found_opaque = true; + break; + } + } + if (!found_opaque) { return false; } return true; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopnode.hpp --- a/hotspot/src/share/vm/opto/loopnode.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopnode.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -69,9 +69,13 @@ PassedSlpAnalysis=512, DoUnrollOnly=1024, VectorizedLoop=2048, - HasAtomicPostLoop=4096 }; + HasAtomicPostLoop=4096, + HasRangeChecks=8192, + IsMultiversioned=16384}; char _unswitch_count; enum { _unswitch_max=3 }; + char _postloop_flags; + enum { LoopNotRCEChecked = 0, LoopRCEChecked = 1, RCEPostLoop = 2 }; public: // Names for edge indices @@ -80,9 +84,13 @@ int is_inner_loop() const { return _loop_flags & InnerLoop; } void set_inner_loop() { _loop_flags |= InnerLoop; } + int range_checks_present() const { return _loop_flags & HasRangeChecks; } + int is_multiversioned() const { return _loop_flags & IsMultiversioned; } + int is_vectorized_loop() const { return _loop_flags & VectorizedLoop; } int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } + void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } void mark_has_reductions() { _loop_flags |= HasReductions; } void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; } @@ -90,15 +98,23 @@ void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; } void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; } void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; } + void mark_has_range_checks() { _loop_flags |= HasRangeChecks; } + void mark_is_multiversioned() { _loop_flags |= IsMultiversioned; } int unswitch_max() { return _unswitch_max; } int unswitch_count() { return _unswitch_count; } + + int has_been_range_checked() const { return _postloop_flags & LoopRCEChecked; } + void set_has_been_range_checked() { _postloop_flags |= LoopRCEChecked; } + int is_rce_post_loop() const { return _postloop_flags & RCEPostLoop; } + void set_is_rce_post_loop() { _postloop_flags |= RCEPostLoop; } + void set_unswitch_count(int val) { assert (val <= unswitch_max(), "too many unswitches"); _unswitch_count = val; } - LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) { + LoopNode(Node *entry, Node *backedge) : RegionNode(3), _loop_flags(0), _unswitch_count(0), _postloop_flags(0) { init_class_id(Class_Loop); init_req(EntryControl, entry); init_req(LoopBackControl, backedge); @@ -225,7 +241,6 @@ int has_passed_slp () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; } int do_unroll_only () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; } int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } - int is_vectorized_loop () const { return (_loop_flags & VectorizedLoop) == VectorizedLoop; } int has_atomic_post_loop () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; } void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } @@ -657,7 +672,7 @@ public: - static bool is_canonical_main_loop_entry(CountedLoopNode* cl); + static bool is_canonical_loop_entry(CountedLoopNode* cl); bool has_node( Node* n ) const { guarantee(n != NULL, "No Node."); @@ -911,6 +926,15 @@ // Add pre and post loops around the given loop. These loops are used // during RCE, unrolling and aligning loops. void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); + + // Add post loop after the given loop. + Node *insert_post_loop(IdealLoopTree *loop, Node_List &old_new, + CountedLoopNode *main_head, CountedLoopEndNode *main_end, + Node *incr, Node *limit, CountedLoopNode *&post_head); + + // Add an RCE'd post loop which we will multi-version adapt for run time test path usage + void insert_scalar_rced_post_loop( IdealLoopTree *loop, Node_List &old_new ); + // Add a vector post loop between a vector main loop and the current post loop void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new); // If Node n lives in the back_ctrl block, we clone a private version of n @@ -983,7 +1007,17 @@ } // Eliminate range-checks and other trip-counter vs loop-invariant tests. - void do_range_check( IdealLoopTree *loop, Node_List &old_new ); + int do_range_check( IdealLoopTree *loop, Node_List &old_new ); + + // Check to see if do_range_check(...) cleaned the main loop of range-checks + void has_range_checks(IdealLoopTree *loop); + + // Process post loops which have range checks and try to build a multi-version + // guard to safely determine if we can execute the post loop which was RCE'd. + bool multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop); + + // Cause the rce'd post loop to optimized away, this happens if we cannot complete multiverioning + void poison_rce_post_loop(IdealLoopTree *rce_loop); // Create a slow version of the loop by cloning the loop // and inserting an if to select fast-slow versions. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/loopopts.cpp --- a/hotspot/src/share/vm/opto/loopopts.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/loopopts.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/castnode.hpp" #include "opto/connode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/matcher.cpp --- a/hotspot/src/share/vm/opto/matcher.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/matcher.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/ad.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/memnode.cpp --- a/hotspot/src/share/vm/opto/memnode.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/memnode.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "classfile/systemDictionary.hpp" #include "compiler/compileLog.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/arraycopynode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/node.cpp --- a/hotspot/src/share/vm/opto/node.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/node.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/castnode.hpp" #include "opto/cfgnode.hpp" #include "opto/connode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/parse1.cpp --- a/hotspot/src/share/vm/opto/parse1.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/parse1.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" +#include "memory/resourceArea.hpp" #include "oops/method.hpp" #include "opto/addnode.hpp" #include "opto/c2compiler.hpp" @@ -661,8 +662,7 @@ // (Note that dead locals do not get phis built, ever.) ensure_phis_everywhere(); - if (block->is_SEL_head() && - (UseLoopPredicate || LoopLimitCheck)) { + if (block->is_SEL_head() && UseLoopPredicate) { // Add predicate to single entry (not irreducible) loop head. assert(!block->has_merged_backedge(), "only entry paths should be merged for now"); // Need correct bci for predicate. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/parse2.cpp --- a/hotspot/src/share/vm/opto/parse2.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/parse2.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "opto/addnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/phaseX.cpp --- a/hotspot/src/share/vm/opto/phaseX.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/phaseX.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/block.hpp" #include "opto/callnode.hpp" #include "opto/castnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/postaloc.cpp --- a/hotspot/src/share/vm/opto/postaloc.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/postaloc.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/chaitin.hpp" #include "opto/machnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/reg_split.cpp --- a/hotspot/src/share/vm/opto/reg_split.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/reg_split.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/c2compiler.hpp" #include "opto/callnode.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/replacednodes.cpp --- a/hotspot/src/share/vm/opto/replacednodes.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/replacednodes.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "memory/resourceArea.hpp" #include "opto/cfgnode.hpp" #include "opto/phaseX.hpp" #include "opto/replacednodes.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/runtime.cpp --- a/hotspot/src/share/vm/opto/runtime.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/runtime.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ #include "interpreter/linkResolver.hpp" #include "logging/log.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" @@ -1288,7 +1289,7 @@ if (log_is_enabled(Info, exceptions)) { ResourceMark rm; - trace_exception(LogHandle(exceptions)::info_stream(), exception(), pc, ""); + trace_exception(Log(exceptions)::info_stream(), exception(), pc, ""); } // for AbortVMOnException flag diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/opto/superword.cpp --- a/hotspot/src/share/vm/opto/superword.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/opto/superword.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "compiler/compileLog.hpp" #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "opto/addnode.hpp" #include "opto/callnode.hpp" #include "opto/castnode.hpp" @@ -3076,7 +3077,7 @@ CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) { // The loop cannot be optimized if the graph shape at // the loop entry is inappropriate. - if (!PhaseIdealLoop::is_canonical_main_loop_entry(cl)) { + if (!PhaseIdealLoop::is_canonical_loop_entry(cl)) { return NULL; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jni.cpp --- a/hotspot/src/share/vm/prims/jni.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jni.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -39,6 +39,7 @@ #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceOop.hpp" @@ -349,7 +350,7 @@ &st, CHECK_NULL); - if (log_is_enabled(Info, classresolve) && k != NULL) { + if (log_is_enabled(Debug, classresolve) && k != NULL) { trace_class_resolution(k); } @@ -419,7 +420,7 @@ result = find_class_from_class_loader(env, sym, true, loader, protection_domain, true, thread); - if (log_is_enabled(Info, classresolve) && result != NULL) { + if (log_is_enabled(Debug, classresolve) && result != NULL) { trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result))); } @@ -3271,7 +3272,7 @@ TempNewSymbol sym = SymbolTable::new_symbol(name, CHECK_NULL); jclass result = find_class_from_class_loader(env, sym, true, loader, protection_domain, true, CHECK_NULL); - if (log_is_enabled(Info, classresolve) && result != NULL) { + if (log_is_enabled(Debug, classresolve) && result != NULL) { trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result))); } return result; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvm.cpp --- a/hotspot/src/share/vm/prims/jvm.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvm.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -38,6 +38,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/bytecode.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/fieldStreams.hpp" #include "oops/instanceKlass.hpp" @@ -210,9 +211,9 @@ const char * to = to_class->external_name(); // print in a single call to reduce interleaving between threads if (source_file != NULL) { - log_info(classresolve)("%s %s %s:%d (%s)", from, to, source_file, line_number, trace); + log_debug(classresolve)("%s %s %s:%d (%s)", from, to, source_file, line_number, trace); } else { - log_info(classresolve)("%s %s (%s)", from, to, trace); + log_debug(classresolve)("%s %s (%s)", from, to, trace); } } } @@ -518,19 +519,13 @@ JVM_END -JVM_ENTRY(jint, JVM_GetStackTraceDepth(JNIEnv *env, jobject throwable)) - JVMWrapper("JVM_GetStackTraceDepth"); - oop exception = JNIHandles::resolve(throwable); - return java_lang_Throwable::get_stack_trace_depth(exception, THREAD); -JVM_END - - -JVM_ENTRY(jobject, JVM_GetStackTraceElement(JNIEnv *env, jobject throwable, jint index)) - JVMWrapper("JVM_GetStackTraceElement"); - JvmtiVMObjectAllocEventCollector oam; // This ctor (throughout this module) may trigger a safepoint/GC - oop exception = JNIHandles::resolve(throwable); - oop element = java_lang_Throwable::get_stack_trace_element(exception, index, CHECK_NULL); - return JNIHandles::make_local(env, element); +JVM_ENTRY(void, JVM_GetStackTraceElements(JNIEnv *env, jobject throwable, jobjectArray stackTrace)) + JVMWrapper("JVM_GetStackTraceElements"); + Handle exception(THREAD, JNIHandles::resolve(throwable)); + objArrayOop st = objArrayOop(JNIHandles::resolve(stackTrace)); + objArrayHandle stack_trace(THREAD, st); + // Fill in the allocated stack trace + java_lang_Throwable::get_stack_trace_elements(exception, stack_trace, CHECK); JVM_END @@ -839,7 +834,7 @@ return NULL; } - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { trace_class_resolution(k); } return (jclass) JNIHandles::make_local(env, k->java_mirror()); @@ -876,7 +871,7 @@ jclass result = find_class_from_class_loader(env, h_name, init, h_loader, h_prot, false, THREAD); - if (log_is_enabled(Info, classresolve) && result != NULL) { + if (log_is_enabled(Debug, classresolve) && result != NULL) { trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result))); } return result; @@ -906,7 +901,7 @@ jclass result = find_class_from_class_loader(env, h_name, init, h_loader, h_prot, true, thread); - if (log_is_enabled(Info, classresolve) && result != NULL) { + if (log_is_enabled(Debug, classresolve) && result != NULL) { // this function is generally only used for class loading during verification. ResourceMark rm; oop from_mirror = JNIHandles::resolve_non_null(from); @@ -916,7 +911,7 @@ oop mirror = JNIHandles::resolve_non_null(result); Klass* to_class = java_lang_Class::as_Klass(mirror); const char * to = to_class->external_name(); - log_info(classresolve)("%s %s (verification)", from_name, to); + log_debug(classresolve)("%s %s (verification)", from_name, to); } return result; @@ -984,7 +979,7 @@ &st, CHECK_NULL); - if (log_is_enabled(Info, classresolve) && k != NULL) { + if (log_is_enabled(Debug, classresolve) && k != NULL) { trace_class_resolution(k); } @@ -1988,8 +1983,8 @@ Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); if (k->is_instance_klass()) { instanceKlassHandle k_h(THREAD, k); - Handle jcp = sun_reflect_ConstantPool::create(CHECK_NULL); - sun_reflect_ConstantPool::set_cp(jcp(), k_h->constants()); + Handle jcp = reflect_ConstantPool::create(CHECK_NULL); + reflect_ConstantPool::set_cp(jcp(), k_h->constants()); return JNIHandles::make_local(jcp()); } } @@ -2001,7 +1996,7 @@ JVM_ENTRY(jint, JVM_ConstantPoolGetSize(JNIEnv *env, jobject obj, jobject unused)) { JVMWrapper("JVM_ConstantPoolGetSize"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); return cp->length(); } JVM_END @@ -2010,7 +2005,7 @@ JVM_ENTRY(jclass, JVM_ConstantPoolGetClassAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetClassAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_klass() && !tag.is_unresolved_klass()) { @@ -2024,7 +2019,7 @@ JVM_ENTRY(jclass, JVM_ConstantPoolGetClassAtIfLoaded(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetClassAtIfLoaded"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_klass() && !tag.is_unresolved_klass()) { @@ -2069,7 +2064,7 @@ { JVMWrapper("JVM_ConstantPoolGetMethodAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); jobject res = get_method_at_helper(cp, index, true, CHECK_NULL); return res; @@ -2080,7 +2075,7 @@ { JVMWrapper("JVM_ConstantPoolGetMethodAtIfLoaded"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); jobject res = get_method_at_helper(cp, index, false, CHECK_NULL); return res; @@ -2116,7 +2111,7 @@ { JVMWrapper("JVM_ConstantPoolGetFieldAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); jobject res = get_field_at_helper(cp, index, true, CHECK_NULL); return res; @@ -2127,7 +2122,7 @@ { JVMWrapper("JVM_ConstantPoolGetFieldAtIfLoaded"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); jobject res = get_field_at_helper(cp, index, false, CHECK_NULL); return res; @@ -2138,7 +2133,7 @@ { JVMWrapper("JVM_ConstantPoolGetMemberRefInfoAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_field_or_method()) { @@ -2164,7 +2159,7 @@ { JVMWrapper("JVM_ConstantPoolGetClassRefIndexAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_0); constantTag tag = cp->tag_at(index); if (!tag.is_field_or_method()) { @@ -2178,7 +2173,7 @@ { JVMWrapper("JVM_ConstantPoolGetNameAndTypeRefIndexAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_0); constantTag tag = cp->tag_at(index); if (!tag.is_invoke_dynamic() && !tag.is_field_or_method()) { @@ -2192,7 +2187,7 @@ { JVMWrapper("JVM_ConstantPoolGetNameAndTypeRefInfoAt"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_name_and_type()) { @@ -2213,7 +2208,7 @@ JVM_ENTRY(jint, JVM_ConstantPoolGetIntAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetIntAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_0); constantTag tag = cp->tag_at(index); if (!tag.is_int()) { @@ -2226,7 +2221,7 @@ JVM_ENTRY(jlong, JVM_ConstantPoolGetLongAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetLongAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_(0L)); constantTag tag = cp->tag_at(index); if (!tag.is_long()) { @@ -2239,7 +2234,7 @@ JVM_ENTRY(jfloat, JVM_ConstantPoolGetFloatAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetFloatAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_(0.0f)); constantTag tag = cp->tag_at(index); if (!tag.is_float()) { @@ -2252,7 +2247,7 @@ JVM_ENTRY(jdouble, JVM_ConstantPoolGetDoubleAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetDoubleAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_(0.0)); constantTag tag = cp->tag_at(index); if (!tag.is_double()) { @@ -2265,7 +2260,7 @@ JVM_ENTRY(jstring, JVM_ConstantPoolGetStringAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetStringAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_string()) { @@ -2280,7 +2275,7 @@ { JVMWrapper("JVM_ConstantPoolGetUTF8At"); JvmtiVMObjectAllocEventCollector oam; - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_NULL); constantTag tag = cp->tag_at(index); if (!tag.is_symbol()) { @@ -2295,7 +2290,7 @@ JVM_ENTRY(jbyte, JVM_ConstantPoolGetTagAt(JNIEnv *env, jobject obj, jobject unused, jint index)) { JVMWrapper("JVM_ConstantPoolGetTagAt"); - constantPoolHandle cp = constantPoolHandle(THREAD, sun_reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); + constantPoolHandle cp = constantPoolHandle(THREAD, reflect_ConstantPool::get_cp(JNIHandles::resolve_non_null(obj))); bounds_check(cp, index, CHECK_0); constantTag tag = cp->tag_at(index); jbyte result = tag.value(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvm.h --- a/hotspot/src/share/vm/prims/jvm.h Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvm.h Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,11 +201,8 @@ JNIEXPORT void JNICALL JVM_FillInStackTrace(JNIEnv *env, jobject throwable); -JNIEXPORT jint JNICALL -JVM_GetStackTraceDepth(JNIEnv *env, jobject throwable); - -JNIEXPORT jobject JNICALL -JVM_GetStackTraceElement(JNIEnv *env, jobject throwable, jint index); +JNIEXPORT void JNICALL +JVM_GetStackTraceElements(JNIEnv *env, jobject throwable, jobjectArray elements); /* * java.lang.StackWalker diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiEnter.xsl --- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl Wed Jul 05 21:35:27 2017 +0200 @@ -37,6 +37,7 @@ # include "precompiled.hpp" +# include "memory/resourceArea.hpp" # include "utilities/macros.hpp" #if INCLUDE_JVMTI # include "oops/oop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiEnv.cpp --- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -59,6 +59,7 @@ #include "runtime/reflectionUtils.hpp" #include "runtime/signature.hpp" #include "runtime/thread.inline.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframe.hpp" #include "runtime/vmThread.hpp" #include "services/threadService.hpp" @@ -475,7 +476,7 @@ // terminating the VM so we check one more time. // create the zip entry - ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment); + ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment, true); if (zip_entry == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } @@ -519,7 +520,7 @@ // create the zip entry (which will open the zip file and hence // check that the segment is indeed a zip file). - ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment); + ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment, false); if (zip_entry == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiEnvBase.cpp --- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "jvmtifiles/jvmtiEnv.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiExport.cpp --- a/hotspot/src/share/vm/prims/jvmtiExport.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -2315,6 +2315,8 @@ } //////////////////////////////////////////////////////////////////////////////////////////////// +#if INCLUDE_SERVICES +// Attach is disabled if SERVICES is not included // type for the Agent_OnAttach entry point extern "C" { @@ -2416,6 +2418,7 @@ return result; } +#endif // INCLUDE_SERVICES //////////////////////////////////////////////////////////////////////////////////////////////// // Setup current current thread for event collection. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiExport.hpp --- a/hotspot/src/share/vm/prims/jvmtiExport.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -377,9 +377,11 @@ static void transition_pending_onload_raw_monitors() NOT_JVMTI_RETURN; +#if INCLUDE_SERVICES // attach support static jint load_agent_library(const char *agent, const char *absParam, const char *options, outputStream* out) NOT_JVMTI_RETURN_(JNI_ERR); static jint load_agent_library(AttachOperation* op, outputStream* out) NOT_JVMTI_RETURN_(JNI_ERR); +#endif // SetNativeMethodPrefix support static char** get_all_native_method_prefixes(int* count_ptr) NOT_JVMTI_RETURN_(NULL); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp --- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -34,6 +34,7 @@ #include "interpreter/rewriter.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/fieldStreams.hpp" #include "oops/klassVtable.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiTagMap.cpp --- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "jvmtifiles/jvmtiEnv.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/jvmtiTrace.cpp --- a/hotspot/src/share/vm/prims/jvmtiTrace.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/jvmtiTrace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "jvmtifiles/jvmtiEnv.hpp" +#include "memory/resourceArea.hpp" #include "prims/jvmtiTrace.hpp" // diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/methodHandles.cpp --- a/hotspot/src/share/vm/prims/methodHandles.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/methodHandles.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -34,13 +34,14 @@ #include "interpreter/linkResolver.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/reflection.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" @@ -73,7 +74,7 @@ assert(_adapter_code == NULL, "generate only once"); ResourceMark rm; - TraceStartupTime timer("MethodHandles adapters generation"); + TraceTime timer("MethodHandles adapters generation", TRACETIME_LOG(Info, startuptime)); _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size); CodeBuffer code(_adapter_code); MethodHandlesAdapterGenerator g(&code); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/unsafe.cpp --- a/hotspot/src/share/vm/prims/unsafe.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/unsafe.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -26,6 +26,7 @@ #include "classfile/classFileStream.hpp" #include "classfile/vmSymbols.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jni.h" @@ -867,7 +868,10 @@ } const Klass* host_klass = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(host_class)); - assert(host_klass != NULL, "invariant"); + // Primitive types have NULL Klass* fields in their java.lang.Class instances. + if (host_klass == NULL) { + THROW_0(vmSymbols::java_lang_IllegalArgumentException()); + } const char* host_source = host_klass->external_name(); Handle host_loader(THREAD, host_klass->class_loader()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp --- a/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/wbtestmethods/parserTests.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/symbolTable.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/objArrayOop.inline.hpp" #include "prims/jni.h" #include "prims/whitebox.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/prims/whitebox.cpp --- a/hotspot/src/share/vm/prims/whitebox.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/prims/whitebox.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -35,6 +35,8 @@ #include "jvmtifiles/jvmtiEnv.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/constantPool.hpp" #include "oops/oop.inline.hpp" @@ -717,11 +719,6 @@ return result; WB_END -class AlwaysFalseClosure : public BoolObjectClosure { - public: - bool do_object_b(oop p) { return false; } -}; - static AlwaysFalseClosure always_false; WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method)) @@ -997,7 +994,7 @@ WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o)) Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true); - Universe::heap()->collect(GCCause::_last_ditch_collection); + Universe::heap()->collect(GCCause::_wb_full_gc); #if INCLUDE_ALL_GCS if (UseG1GC) { // Needs to be cleared explicitly for G1 @@ -1376,8 +1373,8 @@ return ConstantPool::encode_invokedynamic_index(index); WB_END -WB_ENTRY(void, WB_ClearInlineCaches(JNIEnv* env, jobject wb)) - VM_ClearICs clear_ics; +WB_ENTRY(void, WB_ClearInlineCaches(JNIEnv* env, jobject wb, jboolean preserve_static_stubs)) + VM_ClearICs clear_ics(preserve_static_stubs == JNI_TRUE); VMThread::execute(&clear_ics); WB_END @@ -1757,7 +1754,7 @@ {CC"isShared", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsShared }, {CC"isSharedClass", CC"(Ljava/lang/Class;)Z", (void*)&WB_IsSharedClass }, {CC"areSharedStringsIgnored", CC"()Z", (void*)&WB_AreSharedStringsIgnored }, - {CC"clearInlineCaches", CC"()V", (void*)&WB_ClearInlineCaches }, + {CC"clearInlineCaches0", CC"(Z)V", (void*)&WB_ClearInlineCaches }, {CC"addCompilerDirective", CC"(Ljava/lang/String;)I", (void*)&WB_AddCompilerDirective }, {CC"removeCompilerDirective", CC"(I)V", (void*)&WB_RemoveCompilerDirective }, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -66,16 +66,6 @@ #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp" #define DEFAULT_JAVA_LAUNCHER "generic" -#define UNSUPPORTED_GC_OPTION(gc) \ -do { \ - if (gc) { \ - if (FLAG_IS_CMDLINE(gc)) { \ - warning(#gc " is not supported in this VM. Using Serial GC."); \ - } \ - FLAG_SET_DEFAULT(gc, false); \ - } \ -} while(0) - char* Arguments::_jvm_flags_file = NULL; char** Arguments::_jvm_flags_array = NULL; int Arguments::_num_jvm_flags = 0; @@ -385,6 +375,7 @@ { "JNIDetachReleasesMonitors", JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "UseAltSigs", JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "SegmentedHeapDumpThreshold", JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "PrintOopAddress", JDK_Version::undefined(), JDK_Version::jdk(9), JDK_Version::jdk(10) }, #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS { "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() }, @@ -417,16 +408,36 @@ }; static AliasedLoggingFlag const aliased_logging_flags[] = { - { "TraceClassLoading", LogLevel::Info, true, LogTag::_classload }, - { "TraceClassPaths", LogLevel::Info, true, LogTag::_classpath }, - { "TraceClassResolution", LogLevel::Info, true, LogTag::_classresolve }, - { "TraceClassUnloading", LogLevel::Info, true, LogTag::_classunload }, - { "TraceExceptions", LogLevel::Info, true, LogTag::_exceptions }, - { "TraceMonitorInflation", LogLevel::Debug, true, LogTag::_monitorinflation }, - { "TraceBiasedLocking", LogLevel::Info, true, LogTag::_biasedlocking }, - { NULL, LogLevel::Off, false, LogTag::__NO_TAG } + { "TraceBiasedLocking", LogLevel::Info, true, LOG_TAGS(biasedlocking) }, + { "TraceClassLoading", LogLevel::Info, true, LOG_TAGS(classload) }, + { "TraceClassLoadingPreorder", LogLevel::Debug, true, LOG_TAGS(classload, preorder) }, + { "TraceClassPaths", LogLevel::Info, true, LOG_TAGS(classpath) }, + { "TraceClassResolution", LogLevel::Debug, true, LOG_TAGS(classresolve) }, + { "TraceClassUnloading", LogLevel::Info, true, LOG_TAGS(classunload) }, + { "TraceExceptions", LogLevel::Info, true, LOG_TAGS(exceptions) }, + { "TraceLoaderConstraints", LogLevel::Info, true, LOG_TAGS(classload, constraints) }, + { "TraceMonitorInflation", LogLevel::Debug, true, LOG_TAGS(monitorinflation) }, + { "TraceSafepointCleanupTime", LogLevel::Info, true, LOG_TAGS(safepointcleanup) }, + { NULL, LogLevel::Off, false, LOG_TAGS(_NO_TAG) } }; +#ifndef PRODUCT +// These options are removed in jdk9. Remove this code for jdk10. +static AliasedFlag const removed_develop_logging_flags[] = { + { "TraceClassInitialization", "-Xlog:classinit" }, + { "TraceClassLoaderData", "-Xlog:classloaderdata" }, + { "TraceDefaultMethods", "-Xlog:defaultmethods=debug" }, + { "TraceItables", "-Xlog:itables=debug" }, + { "TraceMonitorMismatch", "-Xlog:monitormismatch=info" }, + { "TraceSafepoint", "-Xlog:safepoint=debug" }, + { "TraceStartupTime", "-Xlog:startuptime" }, + { "TraceVMOperation", "-Xlog:vmoperation=debug" }, + { "PrintVtables", "-Xlog:vtables=debug" }, + { "VerboseVerification", "-Xlog:verification" }, + { NULL, NULL } +}; +#endif //PRODUCT + // Return true if "v" is less than "other", where "other" may be "undefined". static bool version_less_than(JDK_Version v, JDK_Version other) { assert(!v.is_undefined(), "must be defined"); @@ -478,6 +489,18 @@ return 0; } +#ifndef PRODUCT +const char* Arguments::removed_develop_logging_flag_name(const char* name){ + for (size_t i = 0; removed_develop_logging_flags[i].alias_name != NULL; i++) { + const AliasedFlag& flag = removed_develop_logging_flags[i]; + if (strcmp(flag.alias_name, name) == 0) { + return flag.real_name; + } + } + return NULL; +} +#endif // PRODUCT + const char* Arguments::real_flag_name(const char *flag_name) { for (size_t i = 0; aliased_jvm_flags[i].alias_name != NULL; i++) { const AliasedFlag& flag_status = aliased_jvm_flags[i]; @@ -961,14 +984,39 @@ return NULL; } -AliasedLoggingFlag Arguments::catch_logging_aliases(const char* name){ +void log_deprecated_flag(const char* name, bool on, AliasedLoggingFlag alf) { + LogTagType tagSet[] = {alf.tag0, alf.tag1, alf.tag2, alf.tag3, alf.tag4, alf.tag5}; + // Set tagset string buffer at max size of 256, large enough for any alias tagset + const int max_tagset_size = 256; + int max_tagset_len = max_tagset_size - 1; + char tagset_buffer[max_tagset_size]; + tagset_buffer[0] = '\0'; + + // Write tag-set for aliased logging option, in string list form + int max_tags = sizeof(tagSet)/sizeof(tagSet[0]); + for (int i = 0; i < max_tags && tagSet[i] != LogTag::__NO_TAG; i++) { + if (i > 0) { + strncat(tagset_buffer, ",", max_tagset_len - strlen(tagset_buffer)); + } + strncat(tagset_buffer, LogTag::name(tagSet[i]), max_tagset_len - strlen(tagset_buffer)); + } + + log_warning(arguments)("-XX:%s%s is deprecated. Will use -Xlog:%s=%s instead.", + (on) ? "+" : "-", + name, + tagset_buffer, + (on) ? LogLevel::name(alf.level) : "off"); +} + +AliasedLoggingFlag Arguments::catch_logging_aliases(const char* name, bool on){ for (size_t i = 0; aliased_logging_flags[i].alias_name != NULL; i++) { const AliasedLoggingFlag& alf = aliased_logging_flags[i]; if (strcmp(alf.alias_name, name) == 0) { + log_deprecated_flag(name, on, alf); return alf; } } - AliasedLoggingFlag a = {NULL, LogLevel::Off, false, LogTag::__NO_TAG}; + AliasedLoggingFlag a = {NULL, LogLevel::Off, false, LOG_TAGS(_NO_TAG)}; return a; } @@ -981,12 +1029,11 @@ char dummy; const char* real_name; bool warn_if_deprecated = true; - AliasedLoggingFlag alf; if (sscanf(arg, "-%" XSTR(BUFLEN) NAME_RANGE "%c", name, &dummy) == 1) { - alf = catch_logging_aliases(name); + AliasedLoggingFlag alf = catch_logging_aliases(name, false); if (alf.alias_name != NULL){ - LogConfiguration::configure_stdout(LogLevel::Off, alf.exactMatch, alf.tag, LogTag::__NO_TAG); + LogConfiguration::configure_stdout(LogLevel::Off, alf.exactMatch, alf.tag0, alf.tag1, alf.tag2, alf.tag3, alf.tag4, alf.tag5); return true; } real_name = handle_aliases_and_deprecation(name, warn_if_deprecated); @@ -996,9 +1043,9 @@ return set_bool_flag(real_name, false, origin); } if (sscanf(arg, "+%" XSTR(BUFLEN) NAME_RANGE "%c", name, &dummy) == 1) { - alf = catch_logging_aliases(name); + AliasedLoggingFlag alf = catch_logging_aliases(name, true); if (alf.alias_name != NULL){ - LogConfiguration::configure_stdout(alf.level, alf.exactMatch, alf.tag, LogTag::__NO_TAG); + LogConfiguration::configure_stdout(alf.level, alf.exactMatch, alf.tag0, alf.tag1, alf.tag2, alf.tag3, alf.tag4, alf.tag5); return true; } real_name = handle_aliases_and_deprecation(name, warn_if_deprecated); @@ -1202,13 +1249,23 @@ char stripped_argname[BUFLEN+1]; strncpy(stripped_argname, argname, arg_len); stripped_argname[arg_len] = '\0'; // strncpy may not null terminate. - if (is_obsolete_flag(stripped_argname, &since)) { char version[256]; since.to_string(version, sizeof(version)); warning("Ignoring option %s; support was removed in %s", stripped_argname, version); return true; } +#ifndef PRODUCT + else { + const char* replacement; + if ((replacement = removed_develop_logging_flag_name(stripped_argname)) != NULL){ + log_warning(arguments)("%s has been removed. Please use %s instead.", + stripped_argname, + replacement); + return false; + } + } +#endif //PRODUCT } // For locked flags, report a custom error message if available. @@ -1897,26 +1954,45 @@ CollectorPolicy::compute_heap_alignment()); } +bool Arguments::gc_selected() { +#if INCLUDE_ALL_GCS + return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC; +#else + return UseSerialGC; +#endif // INCLUDE_ALL_GCS +} + void Arguments::select_gc_ergonomically() { +#if INCLUDE_ALL_GCS if (os::is_server_class_machine()) { if (should_auto_select_low_pause_collector()) { - FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true); + FLAG_SET_ERGO_IF_DEFAULT(bool, UseConcMarkSweepGC, true); } else { #if defined(JAVASE_EMBEDDED) - FLAG_SET_ERGO(bool, UseParallelGC, true); + FLAG_SET_ERGO_IF_DEFAULT(bool, UseParallelGC, true); #else - FLAG_SET_ERGO(bool, UseG1GC, true); + FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true); #endif } } else { - FLAG_SET_ERGO(bool, UseSerialGC, true); - } + FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true); + } +#else + UNSUPPORTED_OPTION(UseG1GC); + UNSUPPORTED_OPTION(UseParallelGC); + UNSUPPORTED_OPTION(UseParallelOldGC); + UNSUPPORTED_OPTION(UseConcMarkSweepGC); + UNSUPPORTED_OPTION(UseParNewGC); + FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true); +#endif // INCLUDE_ALL_GCS } void Arguments::select_gc() { if (!gc_selected()) { select_gc_ergonomically(); - guarantee(gc_selected(), "No GC selected"); + if (!gc_selected()) { + vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL); + } } } @@ -2041,16 +2117,6 @@ log_trace(gc)("ConcGCThreads: %u", ConcGCThreads); } -#if !INCLUDE_ALL_GCS -#ifdef ASSERT -static bool verify_serial_gc_flags() { - return (UseSerialGC && - !(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC || - UseParallelGC || UseParallelOldGC)); -} -#endif // ASSERT -#endif // INCLUDE_ALL_GCS - void Arguments::set_gc_specific_flags() { #if INCLUDE_ALL_GCS // Set per-collector flags @@ -2072,8 +2138,6 @@ // Keeping the heap 100% free is hard ;-) so limit it to 99%. FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99); } -#else // INCLUDE_ALL_GCS - assert(verify_serial_gc_flags(), "SerialGC unset"); #endif // INCLUDE_ALL_GCS } @@ -3595,9 +3659,14 @@ } #endif +#if !defined(COMPILER2) && !INCLUDE_JVMCI + UNSUPPORTED_OPTION(ProfileInterpreter); + NOT_PRODUCT(UNSUPPORTED_OPTION(TraceProfileInterpreter)); +#endif + #ifndef TIERED // Tiered compilation is undefined. - UNSUPPORTED_OPTION(TieredCompilation, "TieredCompilation"); + UNSUPPORTED_OPTION(TieredCompilation); #endif // If we are running in a headless jre, force java.awt.headless property @@ -3923,17 +3992,6 @@ } } -#if !INCLUDE_ALL_GCS -static void force_serial_gc() { - FLAG_SET_DEFAULT(UseSerialGC, true); - UNSUPPORTED_GC_OPTION(UseG1GC); - UNSUPPORTED_GC_OPTION(UseParallelGC); - UNSUPPORTED_GC_OPTION(UseParallelOldGC); - UNSUPPORTED_GC_OPTION(UseConcMarkSweepGC); - UNSUPPORTED_GC_OPTION(UseParNewGC); -} -#endif // INCLUDE_ALL_GCS - // Sharing support // Construct the path to the archive static char* get_shared_archive_path() { @@ -4297,7 +4355,7 @@ } #if defined(_ALLBSD_SOURCE) || defined(AIX) // UseLargePages is not yet supported on BSD and AIX. - UNSUPPORTED_OPTION(UseLargePages, "-XX:+UseLargePages"); + UNSUPPORTED_OPTION(UseLargePages); #endif ArgumentsExt::report_unsupported_options(); @@ -4328,9 +4386,6 @@ // Set object alignment values. set_object_alignment(); -#if !INCLUDE_ALL_GCS - force_serial_gc(); -#endif // INCLUDE_ALL_GCS #if !INCLUDE_CDS if (DumpSharedSpaces || RequireSharedSpaces) { jio_fprintf(defaultStream::error_stream(), diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/arguments.hpp --- a/hotspot/src/share/vm/runtime/arguments.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/arguments.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -265,7 +265,12 @@ const char* alias_name; LogLevelType level; bool exactMatch; - LogTagType tag; + LogTagType tag0; + LogTagType tag1; + LogTagType tag2; + LogTagType tag3; + LogTagType tag4; + LogTagType tag5; } AliasedLoggingFlag; class Arguments : AllStatic { @@ -503,6 +508,10 @@ // the version number when the flag became obsolete. static bool is_obsolete_flag(const char* flag_name, JDK_Version* version); +#ifndef PRODUCT + static const char* removed_develop_logging_flag_name(const char* name); +#endif // PRODUCT + // Returns 1 if the flag is deprecated (and not yet obsolete or expired). // In this case the 'version' buffer is filled in with the version number when // the flag became deprecated. @@ -517,7 +526,7 @@ // Return NULL if the arg has expired. static const char* handle_aliases_and_deprecation(const char* arg, bool warn); static bool lookup_logging_aliases(const char* arg, char* buffer); - static AliasedLoggingFlag catch_logging_aliases(const char* name); + static AliasedLoggingFlag catch_logging_aliases(const char* name, bool on); static short CompileOnlyClassesNum; static short CompileOnlyClassesMax; static char** CompileOnlyClasses; @@ -558,7 +567,7 @@ static jint adjust_after_os(); static void set_gc_specific_flags(); - static inline bool gc_selected(); // whether a gc has been selected + static bool gc_selected(); // whether a gc has been selected static void select_gc_ergonomically(); #if INCLUDE_JVMCI // Check consistency of jvmci vm argument settings. @@ -723,20 +732,16 @@ static void check_unsupported_dumping_properties() NOT_CDS_RETURN; }; -bool Arguments::gc_selected() { - return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC || UseSerialGC; -} - // Disable options not supported in this release, with a warning if they // were explicitly requested on the command-line -#define UNSUPPORTED_OPTION(opt, description) \ -do { \ - if (opt) { \ - if (FLAG_IS_CMDLINE(opt)) { \ - warning(description " is disabled in this release."); \ - } \ - FLAG_SET_DEFAULT(opt, false); \ - } \ +#define UNSUPPORTED_OPTION(opt) \ +do { \ + if (opt) { \ + if (FLAG_IS_CMDLINE(opt)) { \ + warning("-XX:+" #opt " not supported in this VM"); \ + } \ + FLAG_SET_DEFAULT(opt, false); \ + } \ } while(0) #endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/biasedLocking.cpp --- a/hotspot/src/share/vm/runtime/biasedLocking.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -149,9 +149,13 @@ if (!mark->has_bias_pattern()) { if (log_is_enabled(Info, biasedlocking)) { ResourceMark rm; - log_info(biasedlocking)(" (Skipping revocation of object of type %s " - "because it's no longer biased)", - obj->klass()->external_name()); + log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT + ", mark " INTPTR_FORMAT ", type %s" + ", requesting thread " INTPTR_FORMAT + " because it's no longer biased)", + p2i((void *)obj), (intptr_t) mark, + obj->klass()->external_name(), + (intptr_t) requesting_thread); } return BiasedLocking::NOT_BIASED; } @@ -163,9 +167,9 @@ // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { ResourceMark rm; - log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark " - INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT - " , allow rebias %d , requesting thread " INTPTR_FORMAT, + log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark " + INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT + ", allow rebias %d, requesting thread " INTPTR_FORMAT, p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), @@ -222,13 +226,24 @@ } // Log at "info" level if not bulk, else "trace" level if (!is_bulk) { - log_info(biasedlocking)(" Revoked bias of object biased toward dead thread"); + log_info(biasedlocking)(" Revoked bias of object biased toward dead thread (" + PTR_FORMAT ")", p2i(biased_thread)); } else { - log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread"); + log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread (" + PTR_FORMAT ")", p2i(biased_thread)); } return BiasedLocking::BIAS_REVOKED; } + // Log at "info" level if not bulk, else "trace" level + if (!is_bulk) { + log_info(biasedlocking)(" Revoked bias of object biased toward live thread (" + PTR_FORMAT ")", p2i(biased_thread)); + } else { + log_trace(biasedlocking)(" Revoked bias of object biased toward live thread (" + PTR_FORMAT ")", p2i(biased_thread)); + } + // Thread owning bias is alive. // Check to see whether it currently owns the lock and, if so, // write down the needed displaced headers to the thread's stack. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -290,13 +290,11 @@ #endif // INCLUDE_ALL_GCS } -// Find constraints by name and return only if found constraint's type is equal or lower than current validating type. -CommandLineFlagConstraint* CommandLineFlagConstraintList::find_if_needs_check(const char* name) { +CommandLineFlagConstraint* CommandLineFlagConstraintList::find(const char* name) { CommandLineFlagConstraint* found = NULL; for (int i=0; iname(), name) == 0) && - (constraint->type() <= _validating_type)) { + if (strcmp(constraint->name(), name) == 0) { found = constraint; break; } @@ -304,6 +302,16 @@ return found; } +// Find constraints by name and return only if found constraint's type is equal or lower than current validating type. +CommandLineFlagConstraint* CommandLineFlagConstraintList::find_if_needs_check(const char* name) { + CommandLineFlagConstraint* found = NULL; + CommandLineFlagConstraint* constraint = find(name); + if (constraint && (constraint->type() <= _validating_type)) { + found = constraint; + } + return found; +} + // Check constraints for specific constraint type. bool CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::ConstraintType type) { guarantee(type > _validating_type, "Constraint check is out of order."); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintList.hpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,7 @@ static void init(); static int length() { return (_constraints != NULL) ? _constraints->length() : 0; } static CommandLineFlagConstraint* at(int i) { return (_constraints != NULL) ? _constraints->at(i) : NULL; } + static CommandLineFlagConstraint* find(const char* name); static CommandLineFlagConstraint* find_if_needs_check(const char* name); static void add(CommandLineFlagConstraint* constraint) { _constraints->append(constraint); } // True if 'AfterErgo' or later constraint functions are validated. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" +#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/threadLocalAllocBuffer.hpp" #include "runtime/arguments.hpp" #include "runtime/commandLineFlagConstraintsGC.hpp" @@ -35,6 +36,7 @@ #include "utilities/defaultStream.hpp" #if INCLUDE_ALL_GCS +#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" #include "gc/g1/g1_globals.hpp" #include "gc/g1/heapRegionBounds.inline.hpp" #include "gc/shared/plab.hpp" @@ -113,7 +115,7 @@ static Flag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) { #if INCLUDE_ALL_GCS - if ((UseConcMarkSweepGC || UseG1GC) && (value < PLAB::min_size())) { + if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) { CommandLineError::print(verbose, "%s (" SIZE_FORMAT ") must be " "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n", @@ -126,7 +128,7 @@ static Flag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) { #if INCLUDE_ALL_GCS - if ((UseConcMarkSweepGC || UseG1GC) && (value > PLAB::max_size())) { + if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) { CommandLineError::print(verbose, "%s (" SIZE_FORMAT ") must be " "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n", @@ -381,6 +383,39 @@ return Flag::SUCCESS; } +Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) { +#if INCLUDE_ALL_GCS + if (UseConcMarkSweepGC) { + // ParGCCardsPerStrideChunk should be compared with card table size. + size_t heap_size = Universe::heap()->reserved_region().word_size(); + CardTableModRefBS* bs = (CardTableModRefBS*)GenCollectedHeap::heap()->rem_set()->bs(); + size_t card_table_size = bs->cards_required(heap_size) - 1; // Valid card table size + + if ((size_t)value > card_table_size) { + CommandLineError::print(verbose, + "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and " + "must be less than or equal to card table size (" SIZE_FORMAT ")\n", + value, card_table_size); + return Flag::VIOLATES_CONSTRAINT; + } + + // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread) + // from CardTableModRefBSForCTRS::process_stride(). Note that ParGCStridesPerThread is already checked + // not to make an overflow with ParallelGCThreads from its constraint function. + uintx n_strides = ParallelGCThreads * ParGCStridesPerThread; + uintx ergo_max = max_uintx / n_strides; + if ((uintx)value > ergo_max) { + CommandLineError::print(verbose, + "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be " + "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n", + value, ergo_max); + return Flag::VIOLATES_CONSTRAINT; + } + } +#endif + return Flag::SUCCESS; +} + Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) { Flag::Error status = Flag::SUCCESS; @@ -448,6 +483,22 @@ return Flag::SUCCESS; } +Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) { +#if INCLUDE_ALL_GCS + if (UseConcMarkSweepGC) { + size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity(); + if (value > max_uintx - max_capacity) { + CommandLineError::print(verbose, + "CMSSamplingGrain (" UINTX_FORMAT ") must be " + "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n", + value, max_uintx - max_capacity); + return Flag::VIOLATES_CONSTRAINT; + } + } +#endif + return Flag::SUCCESS; +} + Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) { #if INCLUDE_ALL_GCS if (UseConcMarkSweepGC) { @@ -457,6 +508,27 @@ return Flag::SUCCESS; } +Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) { +#if INCLUDE_ALL_GCS + // Skip for current default value. + if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) { + // CMSBitMapYieldQuantum should be compared with mark bitmap size. + ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen(); + size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords(); + + if (value > bitmap_size) { + CommandLineError::print(verbose, + "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must " + "be less than or equal to bitmap size (" SIZE_FORMAT ") " + "whose size corresponds to the size of old generation of the Java heap\n", + value, bitmap_size); + return Flag::VIOLATES_CONSTRAINT; + } + } +#endif + return Flag::SUCCESS; +} + Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) { #if INCLUDE_ALL_GCS if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) { @@ -589,9 +661,15 @@ "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n", value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); return Flag::VIOLATES_CONSTRAINT; - } else { - return Flag::SUCCESS; } + if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) { + CommandLineError::print(verbose, + "MinTLABSize (" SIZE_FORMAT ") must be " + "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n", + value, ThreadLocalAllocBuffer::max_size() * HeapWordSize); + return Flag::VIOLATES_CONSTRAINT; + } + return Flag::SUCCESS; } Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.hpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsGC.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,12 +56,15 @@ #endif // INCLUDE_ALL_GCS Flag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose); +Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose); Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose); Flag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose); Flag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose); Flag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose); Flag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose); +Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose); Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose); +Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose); Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose); Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose); Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,3 +130,36 @@ return Flag::SUCCESS; } } + +static inline Flag::Error sharedConstraintFunc(const char *name, size_t value, size_t taken, bool verbose) { + size_t available = (MAX_SHARED_DELTA-(taken+SHARED_PAGE)); + if (value > available) { + CommandLineError::print(verbose, + "%s (" SIZE_FORMAT ") must be " + "smaller than or equal to (" SIZE_FORMAT ")\n", + name, value, available); + return Flag::VIOLATES_CONSTRAINT; + } else { + return Flag::SUCCESS; + } +} + +Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose) { + size_t taken = (SharedReadOnlySize+SharedMiscDataSize+SharedMiscCodeSize); + return sharedConstraintFunc("SharedReadWriteSize", value, taken, verbose); +} + +Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose) { + size_t taken = (SharedReadWriteSize+SharedMiscDataSize+SharedMiscCodeSize); + return sharedConstraintFunc("SharedReadOnlySize", value, taken, verbose); +} + +Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose) { + size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscCodeSize); + return sharedConstraintFunc("SharedMiscDataSize", value, taken, verbose); +} + +Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose) { + size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscDataSize); + return sharedConstraintFunc("SharedMiscCodeSize", value, taken, verbose); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp --- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,4 +45,9 @@ Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose); +Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose); +Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose); +Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose); +Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose); + #endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagRangeList.cpp --- a/hotspot/src/share/vm/runtime/commandLineFlagRangeList.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagRangeList.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,6 +27,7 @@ #include "classfile/symbolTable.hpp" #include "gc/shared/referenceProcessor.hpp" #include "runtime/arguments.hpp" +#include "runtime/commandLineFlagConstraintList.hpp" #include "runtime/commandLineFlagRangeList.hpp" #include "runtime/os.hpp" #include "runtime/task.hpp" @@ -378,12 +379,18 @@ return found; } -void CommandLineFlagRangeList::print(const char* name, outputStream* st, bool unspecified) { +void CommandLineFlagRangeList::print(outputStream* st, const char* name, RangeStrFunc default_range_str_func) { CommandLineFlagRange* range = CommandLineFlagRangeList::find(name); if (range != NULL) { range->print(st); - } else if (unspecified == true) { - st->print("[ ... ]"); + } else { + CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find(name); + if (constraint != NULL) { + assert(default_range_str_func!=NULL, "default_range_str_func must be provided"); + st->print("%s", default_range_str_func()); + } else { + st->print("[ ... ]"); + } } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/commandLineFlagRangeList.hpp --- a/hotspot/src/share/vm/runtime/commandLineFlagRangeList.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/commandLineFlagRangeList.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,7 +71,7 @@ static CommandLineFlagRange* at(int i) { return (_ranges != NULL) ? _ranges->at(i) : NULL; } static CommandLineFlagRange* find(const char* name); static void add(CommandLineFlagRange* range) { _ranges->append(range); } - static void print(const char* name, outputStream* st, bool unspecified = false); + static void print(outputStream* st, const char* name, RangeStrFunc default_range_str_func); // Check the final values of all flags for ranges. static bool check_ranges(); }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/compilationPolicy.cpp --- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "code/nmethod.hpp" #include "code/scopeDesc.hpp" #include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/fieldType.cpp --- a/hotspot/src/share/vm/runtime/fieldType.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/fieldType.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayKlass.hpp" #include "runtime/fieldType.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/fprofiler.cpp --- a/hotspot/src/share/vm/runtime/fprofiler.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/fprofiler.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/globals.cpp --- a/hotspot/src/share/vm/runtime/globals.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/globals.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -84,6 +84,56 @@ MATERIALIZE_FLAGS_EXT +#define DEFAULT_RANGE_STR_CHUNK_SIZE 64 +static char* create_range_str(const char *fmt, ...) { + static size_t string_length = DEFAULT_RANGE_STR_CHUNK_SIZE; + static char* range_string = NEW_C_HEAP_ARRAY(char, string_length, mtLogging); + + int size_needed = 0; + do { + va_list args; + va_start(args, fmt); + size_needed = jio_vsnprintf(range_string, string_length, fmt, args); + va_end(args); + + if (size_needed < 0) { + string_length += DEFAULT_RANGE_STR_CHUNK_SIZE; + range_string = REALLOC_C_HEAP_ARRAY(char, range_string, string_length, mtLogging); + guarantee(range_string != NULL, "create_range_str string should not be NULL"); + } + } while (size_needed < 0); + + return range_string; +} + +const char* Flag::get_int_default_range_str() { + return create_range_str("[ " INT32_FORMAT_W(-25) " ... " INT32_FORMAT_W(25) " ]", INT_MIN, INT_MAX); +} + +const char* Flag::get_uint_default_range_str() { + return create_range_str("[ " UINT32_FORMAT_W(-25) " ... " UINT32_FORMAT_W(25) " ]", 0, UINT_MAX); +} + +const char* Flag::get_intx_default_range_str() { + return create_range_str("[ " INTX_FORMAT_W(-25) " ... " INTX_FORMAT_W(25) " ]", min_intx, max_intx); +} + +const char* Flag::get_uintx_default_range_str() { + return create_range_str("[ " UINTX_FORMAT_W(-25) " ... " UINTX_FORMAT_W(25) " ]", 0, max_uintx); +} + +const char* Flag::get_uint64_t_default_range_str() { + return create_range_str("[ " UINT64_FORMAT_W(-25) " ... " UINT64_FORMAT_W(25) " ]", 0, uint64_t(max_juint)); +} + +const char* Flag::get_size_t_default_range_str() { + return create_range_str("[ " SIZE_FORMAT_W(-25) " ... " SIZE_FORMAT_W(25) " ]", 0, SIZE_MAX); +} + +const char* Flag::get_double_default_range_str() { + return create_range_str("[ %-25.3f ... %25.3f ]", DBL_MIN, DBL_MAX); +} + static bool is_product_build() { #ifdef PRODUCT return true; @@ -405,7 +455,25 @@ } else if (!is_bool() && !is_ccstr()) { st->print("%9s %-50s ", _type, _name); - CommandLineFlagRangeList::print(_name, st, true); + RangeStrFunc func = NULL; + if (is_int()) { + func = Flag::get_int_default_range_str; + } else if (is_uint()) { + func = Flag::get_uint_default_range_str; + } else if (is_intx()) { + func = Flag::get_intx_default_range_str; + } else if (is_uintx()) { + func = Flag::get_uintx_default_range_str; + } else if (is_uint64_t()) { + func = Flag::get_uint64_t_default_range_str; + } else if (is_size_t()) { + func = Flag::get_size_t_default_range_str; + } else if (is_double()) { + func = Flag::get_double_default_range_str; + } else { + ShouldNotReachHere(); + } + CommandLineFlagRangeList::print(st, _name, func); st->print(" %-20s", " "); print_kind(st); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/globals.hpp --- a/hotspot/src/share/vm/runtime/globals.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -224,6 +224,9 @@ typedef const char* ccstr; typedef const char* ccstrlist; // represents string arguments which accumulate +// function type that will construct default range string +typedef const char* (*RangeStrFunc)(void); + struct Flag { enum Flags { // value origin @@ -305,6 +308,14 @@ static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false); static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); + static const char* get_int_default_range_str(); + static const char* get_uint_default_range_str(); + static const char* get_intx_default_range_str(); + static const char* get_uintx_default_range_str(); + static const char* get_uint64_t_default_range_str(); + static const char* get_size_t_default_range_str(); + static const char* get_double_default_range_str(); + void check_writable(); bool is_bool() const; @@ -727,7 +738,7 @@ "Control whether SHA instructions can be used " \ "on SPARC, on ARM and on x86") \ \ - diagnostic(bool, UseGHASHIntrinsics, false, \ + product(bool, UseGHASHIntrinsics, false, \ "Use intrinsics for GHASH versions of crypto") \ \ product(size_t, LargePageSizeInBytes, 0, \ @@ -797,27 +808,27 @@ product(bool, UseInlineCaches, true, \ "Use Inline Caches for virtual calls ") \ \ - diagnostic(bool, InlineArrayCopy, true, \ + develop(bool, InlineArrayCopy, true, \ "Inline arraycopy native that is known to be part of " \ "base library DLL") \ \ - diagnostic(bool, InlineObjectHash, true, \ + develop(bool, InlineObjectHash, true, \ "Inline Object::hashCode() native that is known to be part " \ "of base library DLL") \ \ - diagnostic(bool, InlineNatives, true, \ + develop(bool, InlineNatives, true, \ "Inline natives that are known to be part of base library DLL") \ \ - diagnostic(bool, InlineMathNatives, true, \ + develop(bool, InlineMathNatives, true, \ "Inline SinD, CosD, etc.") \ \ - diagnostic(bool, InlineClassNatives, true, \ + develop(bool, InlineClassNatives, true, \ "Inline Class.isInstance, etc") \ \ - diagnostic(bool, InlineThreadNatives, true, \ + develop(bool, InlineThreadNatives, true, \ "Inline Thread.currentThread, etc") \ \ - diagnostic(bool, InlineUnsafeOps, true, \ + develop(bool, InlineUnsafeOps, true, \ "Inline memory ops (native methods) from Unsafe") \ \ product(bool, CriticalJNINatives, true, \ @@ -826,34 +837,34 @@ notproduct(bool, StressCriticalJNINatives, false, \ "Exercise register saving code in critical natives") \ \ - diagnostic(bool, UseAESIntrinsics, false, \ + product(bool, UseAESIntrinsics, false, \ "Use intrinsics for AES versions of crypto") \ \ - diagnostic(bool, UseAESCTRIntrinsics, false, \ + product(bool, UseAESCTRIntrinsics, false, \ "Use intrinsics for the paralleled version of AES/CTR crypto") \ \ - diagnostic(bool, UseSHA1Intrinsics, false, \ + product(bool, UseSHA1Intrinsics, false, \ "Use intrinsics for SHA-1 crypto hash function. " \ "Requires that UseSHA is enabled.") \ \ - diagnostic(bool, UseSHA256Intrinsics, false, \ + product(bool, UseSHA256Intrinsics, false, \ "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ "Requires that UseSHA is enabled.") \ \ - diagnostic(bool, UseSHA512Intrinsics, false, \ + product(bool, UseSHA512Intrinsics, false, \ "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \ "Requires that UseSHA is enabled.") \ \ - diagnostic(bool, UseCRC32Intrinsics, false, \ + product(bool, UseCRC32Intrinsics, false, \ "use intrinsics for java.util.zip.CRC32") \ \ - diagnostic(bool, UseCRC32CIntrinsics, false, \ + product(bool, UseCRC32CIntrinsics, false, \ "use intrinsics for java.util.zip.CRC32C") \ \ - diagnostic(bool, UseAdler32Intrinsics, false, \ + product(bool, UseAdler32Intrinsics, false, \ "use intrinsics for java.util.zip.Adler32") \ \ - diagnostic(bool, UseVectorizedMismatchIntrinsic, false, \ + product(bool, UseVectorizedMismatchIntrinsic, false, \ "Enables intrinsification of ArraysSupport.vectorizedMismatch()") \ \ diagnostic(ccstrlist, DisableIntrinsic, "", \ @@ -951,9 +962,6 @@ notproduct(bool, PrintMallocFree, false, \ "Trace calls to C heap malloc/free allocation") \ \ - product(bool, PrintOopAddress, false, \ - "Always print the location of the oop") \ - \ notproduct(bool, VerifyCodeCache, false, \ "Verify code cache on memory allocation/deallocation") \ \ @@ -990,9 +998,6 @@ develop(bool, PrintVMMessages, true, \ "Print VM messages on console") \ \ - diagnostic(bool, VerboseVerification, false, \ - "Display detailed verification details") \ - \ notproduct(uintx, ErrorHandlerTest, 0, \ "If > 0, provokes an error after VM initialization; the value " \ "determines which error to provoke. See test_error_handler() " \ @@ -1052,9 +1057,6 @@ "directory) of the dump file (defaults to java_pid.hprof " \ "in the working directory)") \ \ - develop(size_t, HeapDumpSegmentSize, 1*G, \ - "Approximate segment size when generating a segmented heap dump") \ - \ develop(bool, BreakAtWarning, false, \ "Execute breakpoint upon encountering VM warning") \ \ @@ -1460,9 +1462,6 @@ develop(bool, TimeOopMap2, false, \ "Time calls to GenerateOopMap::compute_map() individually") \ \ - develop(bool, TraceMonitorMismatch, false, \ - "Trace monitor matching failures during OopMapGeneration") \ - \ develop(bool, TraceOopMapRewrites, false, \ "Trace rewriting of method oops during oop map generation") \ \ @@ -1472,9 +1471,6 @@ develop(bool, TraceCompiledIC, false, \ "Trace changes of compiled IC") \ \ - develop(bool, TraceClearedExceptions, false, \ - "Print when an exception is forcibly cleared") \ - \ /* gc */ \ \ product(bool, UseSerialGC, false, \ @@ -1633,6 +1629,7 @@ "The number of cards in each chunk of the parallel chunks used " \ "during card table scanning") \ range(1, max_intx) \ + constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)\ \ product(uintx, OldPLABWeight, 50, \ "Percentage (0-100) used to weight the current sample when " \ @@ -1904,7 +1901,8 @@ \ product(uintx, CMSSamplingGrain, 16*K, \ "The minimum distance between eden samples for CMS (see above)") \ - range(1, max_uintx) \ + range(ObjectAlignmentInBytes, max_uintx) \ + constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit) \ \ product(bool, CMSScavengeBeforeRemark, false, \ "Attempt scavenge before the CMS remark step") \ @@ -1929,6 +1927,7 @@ "Bitmap operations should process at most this many bits " \ "between yields") \ range(1, max_uintx) \ + constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit) \ \ product(bool, CMSPrintChunksInDump, false, \ "If logging for the \"gc\" and \"promotion\" tags is enabled on" \ @@ -2067,9 +2066,6 @@ develop(uintx, MetadataAllocationFailALotInterval, 1000, \ "Metadata allocation failure a lot interval") \ \ - develop(bool, TraceMetadataChunkAllocation, false, \ - "Trace chunk metadata allocations") \ - \ notproduct(bool, ExecuteInternalVMTests, false, \ "Enable execution of internal VM tests") \ \ @@ -2223,10 +2219,10 @@ "Decay factor to TenuredGenerationSizeIncrement") \ range(1, max_uintx) \ \ - product(uintx, MaxGCPauseMillis, max_uintx, \ + product(uintx, MaxGCPauseMillis, max_uintx - 1, \ "Adaptive size policy maximum GC pause time goal in millisecond, "\ "or (G1 Only) the maximum GC time per MMU time slice") \ - range(1, max_uintx) \ + range(1, max_uintx - 1) \ constraint(MaxGCPauseMillisConstraintFunc,AfterMemoryInit) \ \ product(uintx, GCPauseIntervalMillis, 0, \ @@ -2390,12 +2386,6 @@ product(bool, IgnoreEmptyClassPaths, false, \ "Ignore empty path elements in -classpath") \ \ - product(bool, TraceClassLoadingPreorder, false, \ - "Trace all classes loaded in order referenced (not loaded)") \ - \ - product_rw(bool, TraceLoaderConstraints, false, \ - "Trace loader constraints") \ - \ product(size_t, InitialBootClassLoaderMetaspaceSize, \ NOT_LP64(2200*K) LP64_ONLY(4*M), \ "Initial size of the boot class loader data metaspace") \ @@ -2414,18 +2404,12 @@ manageable(bool, PrintClassHistogram, false, \ "Print a histogram of class instances") \ \ - develop(bool, TraceWorkGang, false, \ - "Trace activities of work gangs") \ - \ develop(bool, TraceGCTaskManager, false, \ "Trace actions of the GC task manager") \ \ develop(bool, TraceGCTaskQueue, false, \ "Trace actions of the GC task queues") \ \ - diagnostic(bool, TraceGCTaskThread, false, \ - "Trace actions of the GC task threads") \ - \ develop(bool, TraceParallelOldGCMarkingPhase, false, \ "Trace marking phase in ParallelOldGC") \ \ @@ -2516,9 +2500,6 @@ "generate locking/unlocking code for synchronized methods and " \ "monitors") \ \ - develop(bool, GenerateCompilerNullChecks, true, \ - "Generate explicit null checks for loads/stores/calls") \ - \ develop(bool, GenerateRangeChecks, true, \ "Generate range checks for array accesses") \ \ @@ -2545,10 +2526,6 @@ LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ NOT_LP64(range(-1, max_intx)) \ \ - product(bool, TraceSafepointCleanupTime, false, \ - "Print the break down of clean up tasks performed during " \ - "safepoint") \ - \ product(bool, Inline, true, \ "Enable inlining") \ \ @@ -2780,10 +2757,6 @@ "Produce histogram of IC misses") \ \ /* interpreter */ \ - develop(bool, ClearInterpreterLocals, false, \ - "Always clear local variables of interpreter activations upon " \ - "entry") \ - \ product_pd(bool, RewriteBytecodes, \ "Allow rewriting of bytecodes (bytecodes are not immutable)") \ \ @@ -3267,7 +3240,8 @@ range(0, max_uintx) \ \ product_pd(size_t, MetaspaceSize, \ - "Initial size of Metaspaces (in bytes)") \ + "Initial threshold (in bytes) at which a garbage collection " \ + "is done to reduce Metaspace usage") \ constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ \ product(size_t, MaxMetaspaceSize, max_uintx, \ @@ -3293,6 +3267,11 @@ range(0, 100) \ constraint(MaxHeapFreeRatioConstraintFunc,AfterErgo) \ \ + product(bool, ShrinkHeapInSteps, true, \ + "When disabled, informs the GC to shrink the java heap directly" \ + " to the target size at the next full GC rather than requiring" \ + " smaller steps during multiple full GCs.") \ + \ product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ "Number of milliseconds per MB of free space in the heap") \ range(0, max_intx) \ @@ -3986,18 +3965,22 @@ product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \ "Size of read-write space for metadata (in bytes)") \ range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \ + constraint(SharedReadWriteSizeConstraintFunc,AfterErgo) \ \ product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \ "Size of read-only space for metadata (in bytes)") \ range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \ + constraint(SharedReadOnlySizeConstraintFunc,AfterErgo) \ \ product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \ "Size of the shared miscellaneous data area (in bytes)") \ range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \ + constraint(SharedMiscDataSizeConstraintFunc,AfterErgo) \ \ product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \ "Size of the shared miscellaneous code area (in bytes)") \ range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \ + constraint(SharedMiscCodeSizeConstraintFunc,AfterErgo) \ \ product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \ NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/globals_extension.hpp --- a/hotspot/src/share/vm/runtime/globals_extension.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/globals_extension.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -290,6 +290,12 @@ #define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::COMMAND_LINE)) #define FLAG_SET_ERGO(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::ERGONOMIC)) +#define FLAG_SET_ERGO_IF_DEFAULT(type, name, value) \ + do { \ + if (FLAG_IS_DEFAULT(name)) { \ + FLAG_SET_ERGO(type, name, value); \ + } \ + } while (0) // Can't put the following in CommandLineFlags because // of a circular dependency on the enum definition. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/init.cpp --- a/hotspot/src/share/vm/runtime/init.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/init.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -47,6 +47,7 @@ void mutex_init(); void chunkpool_init(); void perfMemory_init(); +void SuspendibleThreadSet_init() NOT_ALL_GCS_RETURN; // Initialization done by Java thread in init_globals() void management_init(); @@ -93,6 +94,7 @@ mutex_init(); chunkpool_init(); perfMemory_init(); + SuspendibleThreadSet_init(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/java.cpp --- a/hotspot/src/share/vm/runtime/java.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/java.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,6 +37,7 @@ #endif #include "logging/log.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/constantPool.hpp" #include "oops/generateOopMap.hpp" @@ -465,7 +466,7 @@ Universe::heap()->stop(); // Print GC/heap related information. - LogHandle(gc, heap, exit) log; + Log(gc, heap, exit) log; if (log.is_info()) { ResourceMark rm; Universe::print_on(log.info_stream()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/jniHandles.cpp --- a/hotspot/src/share/vm/runtime/jniHandles.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/jniHandles.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "logging/log.hpp" +#include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/jniHandles.hpp" @@ -128,6 +129,12 @@ } +void JNIHandles::weak_oops_do(OopClosure* f) { + AlwaysTrueClosure always_true; + weak_oops_do(&always_true, f); +} + + void JNIHandles::initialize() { _global_handles = JNIHandleBlock::allocate_block(); _weak_global_handles = JNIHandleBlock::allocate_block(); @@ -185,11 +192,6 @@ } -class AlwaysAliveClosure: public BoolObjectClosure { -public: - bool do_object_b(oop obj) { return true; } -}; - class CountHandleClosure: public OopClosure { private: int _count; @@ -211,9 +213,8 @@ "JNIHandles not initialized"); CountHandleClosure global_handle_count; - AlwaysAliveClosure always_alive; oops_do(&global_handle_count); - weak_oops_do(&always_alive, &global_handle_count); + weak_oops_do(&global_handle_count); st->print_cr("JNI global references: %d", global_handle_count.count()); st->cr(); @@ -230,10 +231,9 @@ void JNIHandles::verify() { VerifyHandleClosure verify_handle; - AlwaysAliveClosure always_alive; oops_do(&verify_handle); - weak_oops_do(&always_alive, &verify_handle); + weak_oops_do(&verify_handle); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/jniHandles.hpp --- a/hotspot/src/share/vm/runtime/jniHandles.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/jniHandles.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -86,6 +86,8 @@ static void oops_do(OopClosure* f); // Traversal of weak global handles. Unreachable oops are cleared. static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); + // Traversal of weak global handles. + static void weak_oops_do(OopClosure* f); }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/logTimer.hpp --- a/hotspot/src/share/vm/runtime/logTimer.hpp Mon Apr 18 16:18:56 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_LOG_TIMER_HPP -#define SHARE_VM_RUNTIME_LOG_TIMER_HPP - -#include "logging/log.hpp" -#include "runtime/timer.hpp" - -// TraceStartupTime is used for tracing the execution time of a block with logging -// Usage: -// { TraceStartupTime t("block time") -// some_code(); -// } -// - -class TraceStartupTime : public TraceTime { - public: - TraceStartupTime(const char* s) : TraceTime(s, log_is_enabled(Info, startuptime), LogTag::_startuptime) {} -}; - -#endif // SHARE_VM_RUNTIME_LOG_TIMER_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/mutexLocker.cpp --- a/hotspot/src/share/vm/runtime/mutexLocker.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -70,7 +70,6 @@ Monitor* Threads_lock = NULL; Monitor* CGC_lock = NULL; Monitor* STS_lock = NULL; -Monitor* SLT_lock = NULL; Monitor* FullGCCount_lock = NULL; Mutex* SATB_Q_FL_lock = NULL; Monitor* SATB_Q_CBL_mon = NULL; @@ -242,9 +241,6 @@ def(JNIGlobalHandle_lock , Mutex , nonleaf, true, Monitor::_safepoint_check_always); // locks JNIHandleBlockFreeList_lock def(JNICritical_lock , Monitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions def(AdapterHandlerLibrary_lock , Mutex , nonleaf, true, Monitor::_safepoint_check_always); - if (UseConcMarkSweepGC) { - def(SLT_lock , Monitor, nonleaf, false, Monitor::_safepoint_check_never); // used in CMS GC for locking PLL lock - } def(Heap_lock , Monitor, nonleaf+1, false, Monitor::_safepoint_check_sometimes); def(JfieldIdCreation_lock , Mutex , nonleaf+1, true, Monitor::_safepoint_check_always); // jfieldID, Used in VM_Operation diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/mutexLocker.hpp --- a/hotspot/src/share/vm/runtime/mutexLocker.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -65,7 +65,6 @@ extern Monitor* CGC_lock; // used for coordination between // fore- & background GC threads. extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet. -extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc extern Mutex* SATB_Q_FL_lock; // Protects SATB Q // buffer free list. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/objectMonitor.cpp --- a/hotspot/src/share/vm/runtime/objectMonitor.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -44,14 +44,6 @@ #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" -#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64) -// Need to inhibit inlining for older versions of GCC to avoid build-time failures - #define NOINLINE __attribute__((noinline)) -#else - #define NOINLINE -#endif - - #ifdef DTRACE_ENABLED // Only bother with this argument setup if dtrace is available @@ -254,7 +246,7 @@ // ----------------------------------------------------------------------------- // Enter support -void NOINLINE ObjectMonitor::enter(TRAPS) { +void ObjectMonitor::enter(TRAPS) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; @@ -431,7 +423,7 @@ #define MAX_RECHECK_INTERVAL 1000 -void NOINLINE ObjectMonitor::EnterI(TRAPS) { +void ObjectMonitor::EnterI(TRAPS) { Thread * const Self = THREAD; assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); @@ -681,7 +673,7 @@ // Knob_Reset and Knob_SpinAfterFutile support and restructuring the // loop accordingly. -void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { +void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { assert(Self != NULL, "invariant"); assert(SelfNode != NULL, "invariant"); assert(SelfNode->_thread == Self, "invariant"); @@ -858,7 +850,7 @@ // ~~~~~~~~ // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of // the fast-path operators have been optimized so the common ::exit() -// operation is 1-0. See i486.ad fast_unlock(), for instance. +// operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock(). // The code emitted by fast_unlock() elides the usual MEMBAR. This // greatly improves latency -- MEMBAR and CAS having considerable local // latency on modern processors -- but at the cost of "stranding". Absent the @@ -871,7 +863,7 @@ // // The CAS() in enter provides for safety and exclusion, while the CAS or // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking -// eliminates the CAS/MEMBAR from the exist path, but it admits stranding. +// eliminates the CAS/MEMBAR from the exit path, but it admits stranding. // We detect and recover from stranding with timers. // // If a thread transiently strands it'll park until (a) another @@ -894,7 +886,7 @@ // structured the code so the windows are short and the frequency // of such futile wakups is low. -void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) { +void ObjectMonitor::exit(bool not_suspended, TRAPS) { Thread * const Self = THREAD; if (THREAD != _owner) { if (THREAD->is_lock_owned((address) _owner)) { @@ -944,7 +936,6 @@ for (;;) { assert(THREAD == _owner, "invariant"); - if (Knob_ExitPolicy == 0) { // release semantics: prior loads and stores from within the critical section // must not float (reorder) past the following store that drops the lock. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/os.cpp --- a/hotspot/src/share/vm/runtime/os.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/os.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,6 +37,7 @@ #ifdef ASSERT #include "memory/guardedMemory.hpp" #endif +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "prims/jvm.h" #include "prims/jvm_misc.hpp" @@ -61,6 +62,7 @@ #include "utilities/events.hpp" # include +# include OSThread* os::_starting_thread = NULL; address os::_polling_page = NULL; @@ -1282,8 +1284,8 @@ _mem_serialize_page = (volatile int32_t *)page; // We initialize the serialization page shift count here // We assume a cache line size of 64 bytes - assert(SerializePageShiftCount == count, - "thread size changed, fix SerializePageShiftCount constant"); + assert(SerializePageShiftCount == count, "JavaThread size changed; " + "SerializePageShiftCount constant should be %d", count); set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t))); } @@ -1367,6 +1369,131 @@ return page_size_for_region(region_size, min_pages, false); } +static const char* errno_to_string (int e, bool short_text) { + #define ALL_SHARED_ENUMS(X) \ + X(E2BIG, "Argument list too long") \ + X(EACCES, "Permission denied") \ + X(EADDRINUSE, "Address in use") \ + X(EADDRNOTAVAIL, "Address not available") \ + X(EAFNOSUPPORT, "Address family not supported") \ + X(EAGAIN, "Resource unavailable, try again") \ + X(EALREADY, "Connection already in progress") \ + X(EBADF, "Bad file descriptor") \ + X(EBADMSG, "Bad message") \ + X(EBUSY, "Device or resource busy") \ + X(ECANCELED, "Operation canceled") \ + X(ECHILD, "No child processes") \ + X(ECONNABORTED, "Connection aborted") \ + X(ECONNREFUSED, "Connection refused") \ + X(ECONNRESET, "Connection reset") \ + X(EDEADLK, "Resource deadlock would occur") \ + X(EDESTADDRREQ, "Destination address required") \ + X(EDOM, "Mathematics argument out of domain of function") \ + X(EEXIST, "File exists") \ + X(EFAULT, "Bad address") \ + X(EFBIG, "File too large") \ + X(EHOSTUNREACH, "Host is unreachable") \ + X(EIDRM, "Identifier removed") \ + X(EILSEQ, "Illegal byte sequence") \ + X(EINPROGRESS, "Operation in progress") \ + X(EINTR, "Interrupted function") \ + X(EINVAL, "Invalid argument") \ + X(EIO, "I/O error") \ + X(EISCONN, "Socket is connected") \ + X(EISDIR, "Is a directory") \ + X(ELOOP, "Too many levels of symbolic links") \ + X(EMFILE, "Too many open files") \ + X(EMLINK, "Too many links") \ + X(EMSGSIZE, "Message too large") \ + X(ENAMETOOLONG, "Filename too long") \ + X(ENETDOWN, "Network is down") \ + X(ENETRESET, "Connection aborted by network") \ + X(ENETUNREACH, "Network unreachable") \ + X(ENFILE, "Too many files open in system") \ + X(ENOBUFS, "No buffer space available") \ + X(ENODATA, "No message is available on the STREAM head read queue") \ + X(ENODEV, "No such device") \ + X(ENOENT, "No such file or directory") \ + X(ENOEXEC, "Executable file format error") \ + X(ENOLCK, "No locks available") \ + X(ENOLINK, "Reserved") \ + X(ENOMEM, "Not enough space") \ + X(ENOMSG, "No message of the desired type") \ + X(ENOPROTOOPT, "Protocol not available") \ + X(ENOSPC, "No space left on device") \ + X(ENOSR, "No STREAM resources") \ + X(ENOSTR, "Not a STREAM") \ + X(ENOSYS, "Function not supported") \ + X(ENOTCONN, "The socket is not connected") \ + X(ENOTDIR, "Not a directory") \ + X(ENOTEMPTY, "Directory not empty") \ + X(ENOTSOCK, "Not a socket") \ + X(ENOTSUP, "Not supported") \ + X(ENOTTY, "Inappropriate I/O control operation") \ + X(ENXIO, "No such device or address") \ + X(EOPNOTSUPP, "Operation not supported on socket") \ + X(EOVERFLOW, "Value too large to be stored in data type") \ + X(EPERM, "Operation not permitted") \ + X(EPIPE, "Broken pipe") \ + X(EPROTO, "Protocol error") \ + X(EPROTONOSUPPORT, "Protocol not supported") \ + X(EPROTOTYPE, "Protocol wrong type for socket") \ + X(ERANGE, "Result too large") \ + X(EROFS, "Read-only file system") \ + X(ESPIPE, "Invalid seek") \ + X(ESRCH, "No such process") \ + X(ETIME, "Stream ioctl() timeout") \ + X(ETIMEDOUT, "Connection timed out") \ + X(ETXTBSY, "Text file busy") \ + X(EWOULDBLOCK, "Operation would block") \ + X(EXDEV, "Cross-device link") + + #define DEFINE_ENTRY(e, text) { e, #e, text }, + + static const struct { + int v; + const char* short_text; + const char* long_text; + } table [] = { + + ALL_SHARED_ENUMS(DEFINE_ENTRY) + + // The following enums are not defined on all platforms. + #ifdef ESTALE + DEFINE_ENTRY(ESTALE, "Reserved") + #endif + #ifdef EDQUOT + DEFINE_ENTRY(EDQUOT, "Reserved") + #endif + #ifdef EMULTIHOP + DEFINE_ENTRY(EMULTIHOP, "Reserved") + #endif + + // End marker. + { -1, "Unknown errno", "Unknown error" } + + }; + + #undef DEFINE_ENTRY + #undef ALL_FLAGS + + int i = 0; + while (table[i].v != -1 && table[i].v != e) { + i ++; + } + + return short_text ? table[i].short_text : table[i].long_text; + +} + +const char* os::strerror(int e) { + return errno_to_string(e, false); +} + +const char* os::errno_name(int e) { + return errno_to_string(e, true); +} + #ifndef PRODUCT void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) { @@ -1540,8 +1667,8 @@ return res; } -void os::pretouch_memory(char* start, char* end) { - for (volatile char *p = start; p < end; p += os::vm_page_size()) { +void os::pretouch_memory(void* start, void* end) { + for (volatile char *p = (char*)start; p < (char*)end; p += os::vm_page_size()) { *p = 0; } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/os.hpp --- a/hotspot/src/share/vm/runtime/os.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/os.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -325,7 +325,7 @@ // to make the OS back the memory range with actual memory. // Current implementation may not touch the last page if unaligned addresses // are passed. - static void pretouch_memory(char* start, char* end); + static void pretouch_memory(void* start, void* end); enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; static bool protect_memory(char* addr, size_t bytes, ProtType prot, @@ -617,6 +617,22 @@ static size_t lasterror(char *buf, size_t len); static int get_last_error(); + // Replacement for strerror(). + // Will return the english description of the error (e.g. "File not found", as + // suggested in the POSIX standard. + // Will return "Unknown error" for an unknown errno value. + // Will not attempt to localize the returned string. + // Will always return a valid string which is a static constant. + // Will not change the value of errno. + static const char* strerror(int e); + + // Will return the literalized version of the given errno (e.g. "EINVAL" + // for EINVAL). + // Will return "Unknown error" for an unknown errno value. + // Will always return a valid string which is a static constant. + // Will not change the value of errno. + static const char* errno_name(int e); + // Determines whether the calling process is being debugged by a user-mode debugger. static bool is_debugger_attached(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/reflection.cpp --- a/hotspot/src/share/vm/runtime/reflection.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/reflection.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -76,9 +76,9 @@ const char * to = to_class->external_name(); // print in a single call to reduce interleaving between threads if (source_file != NULL) { - log_info(classresolve)("%s %s %s:%d (reflection)", from, to, source_file, line_number); + log_debug(classresolve)("%s %s %s:%d (reflection)", from, to, source_file, line_number); } else { - log_info(classresolve)("%s %s (reflection)", from, to); + log_debug(classresolve)("%s %s (reflection)", from, to); } } } @@ -487,7 +487,7 @@ is_same_class_package(current_class, new_class)) { return ACCESS_OK; } - // Allow all accesses from sun/reflect/MagicAccessorImpl subclasses to + // Allow all accesses from jdk/internal/reflect/MagicAccessorImpl subclasses to // succeed trivially. if (current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { return ACCESS_OK; @@ -698,7 +698,7 @@ return true; } - // Allow all accesses from sun/reflect/MagicAccessorImpl subclasses to + // Allow all accesses from jdk/internal/reflect/MagicAccessorImpl subclasses to // succeed trivially. if (current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { return true; @@ -769,7 +769,7 @@ Handle(THREAD, protection_domain), true, CHECK_NULL); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { trace_class_resolution(k); } return k->java_mirror(); @@ -824,7 +824,7 @@ Handle(THREAD, k->protection_domain()), true, CHECK_(Handle())); - if (log_is_enabled(Info, classresolve)) { + if (log_is_enabled(Debug, classresolve)) { trace_class_resolution(result); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/reflectionUtils.cpp --- a/hotspot/src/share/vm/runtime/reflectionUtils.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/reflectionUtils.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -76,9 +76,9 @@ int offset; offset = java_lang_Throwable::get_backtrace_offset(); _filtered_fields->append(new FilteredField(SystemDictionary::Throwable_klass(), offset)); - offset = sun_reflect_ConstantPool::oop_offset(); + offset = reflect_ConstantPool::oop_offset(); _filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset)); - offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset(); + offset = reflect_UnsafeStaticFieldAccessorImpl::base_offset(); _filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset)); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/safepoint.cpp --- a/hotspot/src/share/vm/runtime/safepoint.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/safepoint.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,10 @@ #include "runtime/sweeper.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.inline.hpp" +#include "runtime/timerTrace.hpp" #include "services/runtimeService.hpp" +#include "trace/tracing.hpp" +#include "trace/traceMacros.hpp" #include "utilities/events.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS @@ -79,7 +82,7 @@ // Roll all threads forward to a safepoint and suspend them all void SafepointSynchronize::begin() { - + EventSafepointBegin begin_event; Thread* myThread = Thread::current(); assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint"); @@ -169,191 +172,218 @@ // between states, the safepointing code will wait for the thread to // block itself when it attempts transitions to a new state. // - _state = _synchronizing; - OrderAccess::fence(); + { + EventSafepointStateSync sync_event; + int initial_running = 0; - // Flush all thread states to memory - if (!UseMembar) { - os::serialize_thread_states(); - } + _state = _synchronizing; + OrderAccess::fence(); + + // Flush all thread states to memory + if (!UseMembar) { + os::serialize_thread_states(); + } - // Make interpreter safepoint aware - Interpreter::notice_safepoints(); + // Make interpreter safepoint aware + Interpreter::notice_safepoints(); - if (DeferPollingPageLoopCount < 0) { - // Make polling safepoint aware - guarantee (PageArmed == 0, "invariant") ; - PageArmed = 1 ; - os::make_polling_page_unreadable(); - } + if (DeferPollingPageLoopCount < 0) { + // Make polling safepoint aware + guarantee (PageArmed == 0, "invariant") ; + PageArmed = 1 ; + os::make_polling_page_unreadable(); + } - // Consider using active_processor_count() ... but that call is expensive. - int ncpus = os::processor_count() ; + // Consider using active_processor_count() ... but that call is expensive. + int ncpus = os::processor_count() ; #ifdef ASSERT - for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { - assert(cur->safepoint_state()->is_running(), "Illegal initial state"); - // Clear the visited flag to ensure that the critical counts are collected properly. - cur->set_visited_for_critical_count(false); - } + for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { + assert(cur->safepoint_state()->is_running(), "Illegal initial state"); + // Clear the visited flag to ensure that the critical counts are collected properly. + cur->set_visited_for_critical_count(false); + } #endif // ASSERT - if (SafepointTimeout) - safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS; + if (SafepointTimeout) + safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS; + + // Iterate through all threads until it have been determined how to stop them all at a safepoint + unsigned int iterations = 0; + int steps = 0 ; + while(still_running > 0) { + for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { + assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended"); + ThreadSafepointState *cur_state = cur->safepoint_state(); + if (cur_state->is_running()) { + cur_state->examine_state_of_thread(); + if (!cur_state->is_running()) { + still_running--; + // consider adjusting steps downward: + // steps = 0 + // steps -= NNN + // steps >>= 1 + // steps = MIN(steps, 2000-100) + // if (iterations != 0) steps -= NNN + } + if (log_is_enabled(Trace, safepoint)) { + ResourceMark rm; + cur_state->print_on(Log(safepoint)::trace_stream()); + } + } + } + + if (iterations == 0) { + initial_running = still_running; + if (PrintSafepointStatistics) { + begin_statistics(nof_threads, still_running); + } + } + + if (still_running > 0) { + // Check for if it takes to long + if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) { + print_safepoint_timeout(_spinning_timeout); + } - // Iterate through all threads until it have been determined how to stop them all at a safepoint - unsigned int iterations = 0; - int steps = 0 ; - while(still_running > 0) { - for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { - assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended"); - ThreadSafepointState *cur_state = cur->safepoint_state(); - if (cur_state->is_running()) { - cur_state->examine_state_of_thread(); - if (!cur_state->is_running()) { - still_running--; - // consider adjusting steps downward: - // steps = 0 - // steps -= NNN - // steps >>= 1 - // steps = MIN(steps, 2000-100) - // if (iterations != 0) steps -= NNN + // Spin to avoid context switching. + // There's a tension between allowing the mutators to run (and rendezvous) + // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that + // a mutator might otherwise use profitably to reach a safepoint. Excessive + // spinning by the VM thread on a saturated system can increase rendezvous latency. + // Blocking or yielding incur their own penalties in the form of context switching + // and the resultant loss of $ residency. + // + // Further complicating matters is that yield() does not work as naively expected + // on many platforms -- yield() does not guarantee that any other ready threads + // will run. As such we revert to naked_short_sleep() after some number of iterations. + // nakes_short_sleep() is implemented as a short unconditional sleep. + // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping + // can actually increase the time it takes the VM thread to detect that a system-wide + // stop-the-world safepoint has been reached. In a pathological scenario such as that + // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe. + // In that case the mutators will be stalled waiting for the safepoint to complete and the + // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread + // will eventually wake up and detect that all mutators are safe, at which point + // we'll again make progress. + // + // Beware too that that the VMThread typically runs at elevated priority. + // Its default priority is higher than the default mutator priority. + // Obviously, this complicates spinning. + // + // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0). + // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will. + // + // See the comments in synchronizer.cpp for additional remarks on spinning. + // + // In the future we might: + // 1. Modify the safepoint scheme to avoid potentially unbounded spinning. + // This is tricky as the path used by a thread exiting the JVM (say on + // on JNI call-out) simply stores into its state field. The burden + // is placed on the VM thread, which must poll (spin). + // 2. Find something useful to do while spinning. If the safepoint is GC-related + // we might aggressively scan the stacks of threads that are already safe. + // 3. Use Solaris schedctl to examine the state of the still-running mutators. + // If all the mutators are ONPROC there's no reason to sleep or yield. + // 4. YieldTo() any still-running mutators that are ready but OFFPROC. + // 5. Check system saturation. If the system is not fully saturated then + // simply spin and avoid sleep/yield. + // 6. As still-running mutators rendezvous they could unpark the sleeping + // VMthread. This works well for still-running mutators that become + // safe. The VMthread must still poll for mutators that call-out. + // 7. Drive the policy on time-since-begin instead of iterations. + // 8. Consider making the spin duration a function of the # of CPUs: + // Spin = (((ncpus-1) * M) + K) + F(still_running) + // Alternately, instead of counting iterations of the outer loop + // we could count the # of threads visited in the inner loop, above. + // 9. On windows consider using the return value from SwitchThreadTo() + // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions. + + if (int(iterations) == DeferPollingPageLoopCount) { + guarantee (PageArmed == 0, "invariant") ; + PageArmed = 1 ; + os::make_polling_page_unreadable(); } - if (log_is_enabled(Trace, safepoint)) { - ResourceMark rm; - cur_state->print_on(LogHandle(safepoint)::debug_stream()); + + // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or + // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus) + ++steps ; + if (ncpus > 1 && steps < SafepointSpinBeforeYield) { + SpinPause() ; // MP-Polite spin + } else + if (steps < DeferThrSuspendLoopCount) { + os::naked_yield() ; + } else { + os::naked_short_sleep(1); + } + + iterations ++ ; + } + assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long"); + } + assert(still_running == 0, "sanity check"); + + if (PrintSafepointStatistics) { + update_statistics_on_spin_end(); + } + + if (sync_event.should_commit()) { + sync_event.set_safepointId(safepoint_counter()); + sync_event.set_initialThreadCount(initial_running); + sync_event.set_runningThreadCount(_waiting_to_block); + sync_event.set_iterations(iterations); + sync_event.commit(); + } + } //EventSafepointStateSync + + // wait until all threads are stopped + { + EventSafepointWaitBlocked wait_blocked_event; + int initial_waiting_to_block = _waiting_to_block; + + while (_waiting_to_block > 0) { + log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block); + if (!SafepointTimeout || timeout_error_printed) { + Safepoint_lock->wait(true); // true, means with no safepoint checks + } else { + // Compute remaining time + jlong remaining_time = safepoint_limit_time - os::javaTimeNanos(); + + // If there is no remaining time, then there is an error + if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) { + print_safepoint_timeout(_blocking_timeout); } } } - - if (PrintSafepointStatistics && iterations == 0) { - begin_statistics(nof_threads, still_running); - } - - if (still_running > 0) { - // Check for if it takes to long - if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) { - print_safepoint_timeout(_spinning_timeout); - } + assert(_waiting_to_block == 0, "sanity check"); - // Spin to avoid context switching. - // There's a tension between allowing the mutators to run (and rendezvous) - // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that - // a mutator might otherwise use profitably to reach a safepoint. Excessive - // spinning by the VM thread on a saturated system can increase rendezvous latency. - // Blocking or yielding incur their own penalties in the form of context switching - // and the resultant loss of $ residency. - // - // Further complicating matters is that yield() does not work as naively expected - // on many platforms -- yield() does not guarantee that any other ready threads - // will run. As such we revert to naked_short_sleep() after some number of iterations. - // nakes_short_sleep() is implemented as a short unconditional sleep. - // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping - // can actually increase the time it takes the VM thread to detect that a system-wide - // stop-the-world safepoint has been reached. In a pathological scenario such as that - // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe. - // In that case the mutators will be stalled waiting for the safepoint to complete and the - // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread - // will eventually wake up and detect that all mutators are safe, at which point - // we'll again make progress. - // - // Beware too that that the VMThread typically runs at elevated priority. - // Its default priority is higher than the default mutator priority. - // Obviously, this complicates spinning. - // - // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0). - // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will. - // - // See the comments in synchronizer.cpp for additional remarks on spinning. - // - // In the future we might: - // 1. Modify the safepoint scheme to avoid potentially unbounded spinning. - // This is tricky as the path used by a thread exiting the JVM (say on - // on JNI call-out) simply stores into its state field. The burden - // is placed on the VM thread, which must poll (spin). - // 2. Find something useful to do while spinning. If the safepoint is GC-related - // we might aggressively scan the stacks of threads that are already safe. - // 3. Use Solaris schedctl to examine the state of the still-running mutators. - // If all the mutators are ONPROC there's no reason to sleep or yield. - // 4. YieldTo() any still-running mutators that are ready but OFFPROC. - // 5. Check system saturation. If the system is not fully saturated then - // simply spin and avoid sleep/yield. - // 6. As still-running mutators rendezvous they could unpark the sleeping - // VMthread. This works well for still-running mutators that become - // safe. The VMthread must still poll for mutators that call-out. - // 7. Drive the policy on time-since-begin instead of iterations. - // 8. Consider making the spin duration a function of the # of CPUs: - // Spin = (((ncpus-1) * M) + K) + F(still_running) - // Alternately, instead of counting iterations of the outer loop - // we could count the # of threads visited in the inner loop, above. - // 9. On windows consider using the return value from SwitchThreadTo() - // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions. - - if (int(iterations) == DeferPollingPageLoopCount) { - guarantee (PageArmed == 0, "invariant") ; - PageArmed = 1 ; - os::make_polling_page_unreadable(); - } - - // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or - // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus) - ++steps ; - if (ncpus > 1 && steps < SafepointSpinBeforeYield) { - SpinPause() ; // MP-Polite spin - } else - if (steps < DeferThrSuspendLoopCount) { - os::naked_yield() ; - } else { - os::naked_short_sleep(1); - } - - iterations ++ ; - } - assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long"); - } - assert(still_running == 0, "sanity check"); - - if (PrintSafepointStatistics) { - update_statistics_on_spin_end(); - } - - // wait until all threads are stopped - while (_waiting_to_block > 0) { - log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block); - if (!SafepointTimeout || timeout_error_printed) { - Safepoint_lock->wait(true); // true, means with no safepoint checks - } else { - // Compute remaining time - jlong remaining_time = safepoint_limit_time - os::javaTimeNanos(); - - // If there is no remaining time, then there is an error - if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) { - print_safepoint_timeout(_blocking_timeout); +#ifndef PRODUCT + if (SafepointTimeout) { + jlong current_time = os::javaTimeNanos(); + if (safepoint_limit_time < current_time) { + tty->print_cr("# SafepointSynchronize: Finished after " + INT64_FORMAT_W(6) " ms", + ((current_time - safepoint_limit_time) / MICROUNITS + + (jlong)SafepointTimeoutDelay)); } } - } - assert(_waiting_to_block == 0, "sanity check"); - -#ifndef PRODUCT - if (SafepointTimeout) { - jlong current_time = os::javaTimeNanos(); - if (safepoint_limit_time < current_time) { - tty->print_cr("# SafepointSynchronize: Finished after " - INT64_FORMAT_W(6) " ms", - ((current_time - safepoint_limit_time) / MICROUNITS + - (jlong)SafepointTimeoutDelay)); - } - } #endif - assert((_safepoint_counter & 0x1) == 0, "must be even"); - assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); - _safepoint_counter ++; + assert((_safepoint_counter & 0x1) == 0, "must be even"); + assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); + _safepoint_counter ++; + + // Record state + _state = _synchronized; - // Record state - _state = _synchronized; + OrderAccess::fence(); - OrderAccess::fence(); + if (wait_blocked_event.should_commit()) { + wait_blocked_event.set_safepointId(safepoint_counter()); + wait_blocked_event.set_runningThreadCount(initial_waiting_to_block); + wait_blocked_event.commit(); + } + } // EventSafepointWaitBlocked #ifdef ASSERT for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) { @@ -377,17 +407,32 @@ } // Call stuff that needs to be run when a safepoint is just about to be completed - do_cleanup_tasks(); + { + EventSafepointCleanup cleanup_event; + do_cleanup_tasks(); + if (cleanup_event.should_commit()) { + cleanup_event.set_safepointId(safepoint_counter()); + cleanup_event.commit(); + } + } if (PrintSafepointStatistics) { // Record how much time spend on the above cleanup tasks update_statistics_on_cleanup_end(os::javaTimeNanos()); } + if (begin_event.should_commit()) { + begin_event.set_safepointId(safepoint_counter()); + begin_event.set_totalThreadCount(nof_threads); + begin_event.set_jniCriticalThreadCount(_current_jni_active_count); + begin_event.commit(); + } } // Wake up all threads, so they are ready to resume execution after the safepoint // operation has been carried out void SafepointSynchronize::end() { + EventSafepointEnd event; + int safepoint_id = safepoint_counter(); // Keep the odd counter as "id" assert(Threads_lock->owned_by_self(), "must hold Threads_lock"); assert((_safepoint_counter & 0x1) == 1, "must be odd"); @@ -474,6 +519,11 @@ // record this time so VMThread can keep track how much time has elapsed // since last safepoint. _end_of_last_safepoint = os::javaTimeMillis(); + + if (event.should_commit()) { + event.set_safepointId(safepoint_id); + event.commit(); + } } bool SafepointSynchronize::is_cleanup_needed() { @@ -482,44 +532,71 @@ return false; } - +static void event_safepoint_cleanup_task_commit(EventSafepointCleanupTask& event, const char* name) { + if (event.should_commit()) { + event.set_safepointId(SafepointSynchronize::safepoint_counter()); + event.set_name(name); + event.commit(); + } +} // Various cleaning tasks that should be done periodically at safepoints void SafepointSynchronize::do_cleanup_tasks() { { - TraceTime t1("deflating idle monitors", TraceSafepointCleanupTime); + const char* name = "deflating idle monitors"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); ObjectSynchronizer::deflate_idle_monitors(); + event_safepoint_cleanup_task_commit(event, name); } { - TraceTime t2("updating inline caches", TraceSafepointCleanupTime); + const char* name = "updating inline caches"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); InlineCacheBuffer::update_inline_caches(); + event_safepoint_cleanup_task_commit(event, name); } { - TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime); + const char* name = "compilation policy safepoint handler"; + EventSafepointCleanupTask event; + TraceTime timer("compilation policy safepoint handler", TRACETIME_LOG(Info, safepointcleanup)); CompilationPolicy::policy()->do_safepoint_work(); + event_safepoint_cleanup_task_commit(event, name); } { - TraceTime t4("mark nmethods", TraceSafepointCleanupTime); + const char* name = "mark nmethods"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); NMethodSweeper::mark_active_nmethods(); + event_safepoint_cleanup_task_commit(event, name); } if (SymbolTable::needs_rehashing()) { - TraceTime t5("rehashing symbol table", TraceSafepointCleanupTime); + const char* name = "rehashing symbol table"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); SymbolTable::rehash_table(); + event_safepoint_cleanup_task_commit(event, name); } if (StringTable::needs_rehashing()) { - TraceTime t6("rehashing string table", TraceSafepointCleanupTime); + const char* name = "rehashing string table"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); StringTable::rehash_table(); + event_safepoint_cleanup_task_commit(event, name); } { // CMS delays purging the CLDG until the beginning of the next safepoint and to // make sure concurrent sweep is done - TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime); + const char* name = "purging class loader data graph"; + EventSafepointCleanupTask event; + TraceTime timer(name, TRACETIME_LOG(Info, safepointcleanup)); ClassLoaderDataGraph::purge_if_needed(); + event_safepoint_cleanup_task_commit(event, name); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/safepoint.hpp --- a/hotspot/src/share/vm/runtime/safepoint.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/safepoint.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -145,6 +145,7 @@ // Query inline static bool is_at_safepoint() { return _state == _synchronized; } inline static bool is_synchronizing() { return _state == _synchronizing; } + inline static int safepoint_counter() { return _safepoint_counter; } inline static bool do_call_back() { return (_state != _not_synchronized); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/sharedRuntime.cpp --- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "logging/log.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/klass.hpp" #include "oops/objArrayKlass.hpp" @@ -993,19 +994,6 @@ return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); } - -#ifndef PRODUCT -JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) - const frame f = thread->last_frame(); - assert(f.is_interpreted_frame(), "must be an interpreted frame"); -#ifndef PRODUCT - methodHandle mh(THREAD, f.interpreter_frame_method()); - BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); -#endif // !PRODUCT - return preserve_this_value; -JRT_END -#endif // !PRODUCT - JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) assert(obj->is_oop(), "must be a valid oop"); #if INCLUDE_JVMCI @@ -1981,8 +1969,8 @@ // Handles the uncommon case in locking, i.e., contention or an inflated lock. JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread)) // Disable ObjectSynchronizer::quick_enter() in default config - // until JDK-8077392 is resolved. - if ((SyncFlags & 256) != 0 && !SafepointSynchronize::is_synchronizing()) { + // on AARCH64 until JDK-8153107 is resolved. + if (AARCH64_ONLY((SyncFlags & 256) != 0 &&) !SafepointSynchronize::is_synchronizing()) { // Only try quick_enter() if we're not trying to reach a safepoint // so that the calling thread reaches the safepoint more quickly. if (ObjectSynchronizer::quick_enter(_obj, thread, lock)) return; @@ -2966,7 +2954,7 @@ Method* moop = fr.interpreter_frame_method(); int max_locals = moop->max_locals(); // Allocate temp buffer, 1 word per local & 2 per active monitor - int buf_size_words = max_locals + active_monitor_count*2; + int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size(); intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode); // Copy the locals. Order is preserved so that loading of longs works. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/sharedRuntime.hpp --- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -269,9 +269,6 @@ static address native_method_throw_unsatisfied_link_error_entry(); static address native_method_throw_unsupported_operation_exception_entry(); - // bytecode tracing is only used by the TraceBytecodes - static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0; - static oop retrieve_receiver(Symbol* sig, frame caller); static void register_finalizer(JavaThread* thread, oopDesc* obj); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/signature.cpp --- a/hotspot/src/share/vm/runtime/signature.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/signature.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -233,6 +233,14 @@ if (level == CompLevel_none) { return; } + +#if INCLUDE_JVMCI + // We can't compile with a JVMCI compiler until the module system is initialized. + if (level == CompLevel_full_optimization && UseJVMCICompiler && !Universe::is_module_initialized()) { + return; + } +#endif + // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling // in the interpreter and then compile with C2 (the transition function will request that, // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/stubRoutines.cpp --- a/hotspot/src/share/vm/runtime/stubRoutines.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,7 +28,7 @@ #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/interfaceSupport.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/copy.hpp" @@ -183,7 +183,7 @@ void StubRoutines::initialize1() { if (_code1 == NULL) { ResourceMark rm; - TraceStartupTime timer("StubRoutines generation 1"); + TraceTime timer("StubRoutines generation 1", TRACETIME_LOG(Info, startuptime)); _code1 = BufferBlob::create("StubRoutines (1)", code_size1); if (_code1 == NULL) { vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)"); @@ -276,7 +276,7 @@ void StubRoutines::initialize2() { if (_code2 == NULL) { ResourceMark rm; - TraceStartupTime timer("StubRoutines generation 2"); + TraceTime timer("StubRoutines generation 2", TRACETIME_LOG(Info, startuptime)); _code2 = BufferBlob::create("StubRoutines (2)", code_size2); if (_code2 == NULL) { vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/synchronizer.cpp --- a/hotspot/src/share/vm/runtime/synchronizer.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,13 +48,6 @@ #include "utilities/events.hpp" #include "utilities/preserveException.hpp" -#if defined(__GNUC__) && !defined(PPC64) -// Need to inhibit inlining for older versions of GCC to avoid build-time failures - #define NOINLINE __attribute__((noinline)) -#else - #define NOINLINE -#endif - // The "core" versions of monitor enter and exit reside in this file. // The interpreter and compilers contain specialized transliterated // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), @@ -211,7 +204,7 @@ // quick_enter() as our thread state remains _in_Java. bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, - BasicLock * Lock) { + BasicLock * lock) { assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(Self->is_Java_thread(), "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); @@ -234,6 +227,18 @@ return true; } + // This Java Monitor is inflated so obj's header will never be + // displaced to this thread's BasicLock. Make the displaced header + // non-NULL so this BasicLock is not seen as recursive nor as + // being locked. We do this unconditionally so that this thread's + // BasicLock cannot be mis-interpreted by any stack walkers. For + // performance reasons, stack walkers generally first check for + // Biased Locking in the object's header, the second check is for + // stack-locking in the object's header, the third check is for + // recursive stack-locking in the displaced header in the BasicLock, + // and last are the inflated Java Monitor (ObjectMonitor) checks. + lock->set_displaced_header(markOopDesc::unused_mark()); + if (owner == NULL && Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { assert(m->_recursions == 0, "invariant"); @@ -278,38 +283,52 @@ } void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { - assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); - // if displaced header is null, the previous enter is recursive enter, no-op + markOop mark = object->mark(); + // We cannot check for Biased Locking if we are racing an inflation. + assert(mark == markOopDesc::INFLATING() || + !mark->has_bias_pattern(), "should not see bias pattern here"); + markOop dhw = lock->displaced_header(); - markOop mark; if (dhw == NULL) { - // Recursive stack-lock. - // Diagnostics -- Could be: stack-locked, inflating, inflated. - mark = object->mark(); - assert(!mark->is_neutral(), "invariant"); - if (mark->has_locker() && mark != markOopDesc::INFLATING()) { - assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); + // If the displaced header is NULL, then this exit matches up with + // a recursive enter. No real work to do here except for diagnostics. +#ifndef PRODUCT + if (mark != markOopDesc::INFLATING()) { + // Only do diagnostics if we are not racing an inflation. Simply + // exiting a recursive enter of a Java Monitor that is being + // inflated is safe; see the has_monitor() comment below. + assert(!mark->is_neutral(), "invariant"); + assert(!mark->has_locker() || + THREAD->is_lock_owned((address)mark->locker()), "invariant"); + if (mark->has_monitor()) { + // The BasicLock's displaced_header is marked as a recursive + // enter and we have an inflated Java Monitor (ObjectMonitor). + // This is a special case where the Java Monitor was inflated + // after this thread entered the stack-lock recursively. When a + // Java Monitor is inflated, we cannot safely walk the Java + // Monitor owner's stack and update the BasicLocks because a + // Java Monitor can be asynchronously inflated by a thread that + // does not own the Java Monitor. + ObjectMonitor * m = mark->monitor(); + assert(((oop)(m->object()))->mark() == mark, "invariant"); + assert(m->is_entered(THREAD), "invariant"); + } } - if (mark->has_monitor()) { - ObjectMonitor * m = mark->monitor(); - assert(((oop)(m->object()))->mark() == mark, "invariant"); - assert(m->is_entered(THREAD), "invariant"); - } +#endif return; } - mark = object->mark(); - - // If the object is stack-locked by the current thread, try to - // swing the displaced header from the box back to the mark. if (mark == (markOop) lock) { + // If the object is stack-locked by the current thread, try to + // swing the displaced header from the BasicLock back to the mark. assert(dhw->is_neutral(), "invariant"); - if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { - TEVENT(fast_exit: release stacklock); + if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) { + TEVENT(fast_exit: release stack-lock); return; } } + // We have to take the slow-path of possible inflation and then exit. ObjectSynchronizer::inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); @@ -1038,7 +1057,7 @@ assert(free_tally == Self->omFreeCount, "free count off"); } -ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { +ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { // A large MAXPRIVATE value reduces both list lock contention // and list coherency traffic, but also tends to increase the // number of objectMonitors in circulation as well as the STW @@ -1313,7 +1332,7 @@ inflate_cause_vm_internal); } -ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, +ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, oop object, const InflateCause cause) { @@ -1742,6 +1761,7 @@ void do_monitor(ObjectMonitor* mid) { if (mid->owner() == THREAD) { if (ObjectMonitor::Knob_VerifyMatch != 0) { + ResourceMark rm; Handle obj((oop) mid->object()); tty->print("INFO: unexpected locked object:"); javaVFrame::print_locked_object_class_name(tty, obj, "locked"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/thread.cpp --- a/hotspot/src/share/vm/runtime/thread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -35,6 +35,7 @@ #include "compiler/compileTask.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "gc/shared/workgroup.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" @@ -44,6 +45,7 @@ #include "logging/logConfiguration.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayOop.hpp" @@ -68,7 +70,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniPeriodicChecker.hpp" -#include "runtime/logTimer.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/memprofiler.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" @@ -152,7 +154,6 @@ // Current thread is maintained as a thread-local variable THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL; #endif - // Class hierarchy // - Thread // - VMThread @@ -791,10 +792,6 @@ handle_area()->oops_do(f); } -void Thread::nmethods_do(CodeBlobClosure* cf) { - // no nmethods in a generic thread... -} - void Thread::metadata_handles_do(void f(Metadata*)) { // Only walk the Handles in Thread. if (metadata_handles() != NULL) { @@ -2093,7 +2090,7 @@ if (log_is_enabled(Info, exceptions)) { ResourceMark rm; - outputStream* logstream = LogHandle(exceptions)::info_stream(); + outputStream* logstream = Log(exceptions)::info_stream(); logstream->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", p2i(this)); if (has_last_Java_frame()) { frame f = last_frame(); @@ -2827,8 +2824,6 @@ } void JavaThread::nmethods_do(CodeBlobClosure* cf) { - Thread::nmethods_do(cf); // (super method is a no-op) - assert((!has_last_Java_frame() && java_call_counter() == 0) || (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!"); @@ -2887,7 +2882,9 @@ // Called by Threads::print() for VM_PrintThreads operation void JavaThread::print_on(outputStream *st) const { - st->print("\"%s\" ", get_thread_name()); + st->print_raw("\""); + st->print_raw(get_thread_name()); + st->print_raw("\" "); oop thread_oop = threadObj(); if (thread_oop != NULL) { st->print("#" INT64_FORMAT " ", java_lang_Thread::thread_id(thread_oop)); @@ -3301,6 +3298,7 @@ : JavaThread(&sweeper_thread_entry) { _scanned_nmethod = NULL; } + void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { JavaThread::oops_do(f, cld_f, cf); if (_scanned_nmethod != NULL && cf != NULL) { @@ -3311,6 +3309,16 @@ } } +void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) { + JavaThread::nmethods_do(cf); + if (_scanned_nmethod != NULL && cf != NULL) { + // Safepoints can occur when the sweeper is scanning an nmethod so + // process it here to make sure it isn't unloaded in the middle of + // a scan. + cf->do_code_blob(_scanned_nmethod); + } +} + // ======= Threads ======== @@ -3416,7 +3424,7 @@ } void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) { - TraceStartupTime timer("Initialize java.lang classes"); + TraceTime timer("Initialize java.lang classes", TRACETIME_LOG(Info, startuptime)); if (EagerXrunInit && Arguments::init_libraries_at_startup()) { create_vm_init_libraries(); @@ -3468,7 +3476,7 @@ } void Threads::initialize_jsr292_core_classes(TRAPS) { - TraceStartupTime timer("Initialize java.lang.invoke classes"); + TraceTime timer("Initialize java.lang.invoke classes", TRACETIME_LOG(Info, startuptime)); initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK); initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK); @@ -3539,7 +3547,7 @@ HOTSPOT_VM_INIT_BEGIN(); // Timing (must come after argument parsing) - TraceStartupTime timer("Create VM"); + TraceTime timer("Create VM", TRACETIME_LOG(Info, startuptime)); // Initialize the os module after parsing the args jint os_init_2_result = os::init_2(); @@ -3628,7 +3636,7 @@ JvmtiExport::transition_pending_onload_raw_monitors(); // Create the VMThread - { TraceStartupTime timer("Start VMThread"); + { TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime)); VMThread::create(); Thread* vmthread = VMThread::vm_thread(); @@ -3703,18 +3711,9 @@ // set_init_completed has just been called, causing exceptions not to be shortcut // anymore. We call vm_exit_during_initialization directly instead. -#if INCLUDE_ALL_GCS - // Support for ConcurrentMarkSweep. This should be cleaned up - // and better encapsulated. The ugly nested if test would go away - // once things are properly refactored. XXX YSR - if (UseConcMarkSweepGC || UseG1GC) { - if (UseConcMarkSweepGC) { - ConcurrentMarkSweepThread::makeSurrogateLockerThread(CHECK_JNI_ERR); - } else { - ConcurrentMarkThread::makeSurrogateLockerThread(CHECK_JNI_ERR); - } - } -#endif // INCLUDE_ALL_GCS + // Initialize reference pending list locker + bool needs_locker_thread = Universe::heap()->needs_reference_pending_list_locker_thread(); + ReferencePendingListLocker::initialize(needs_locker_thread, CHECK_JNI_ERR); // Signal Dispatcher needs to be started before VMInit event is posted os::signal_init(); @@ -4348,9 +4347,13 @@ void Threads::nmethods_do(CodeBlobClosure* cf) { ALL_JAVA_THREADS(p) { - p->nmethods_do(cf); + // This is used by the code cache sweeper to mark nmethods that are active + // on the stack of a Java thread. Ignore the sweeper thread itself to avoid + // marking CodeCacheSweeperThread::_scanned_nmethod as active. + if(!p->is_Code_cache_sweeper_thread()) { + p->nmethods_do(cf); + } } - VMThread::vm_thread()->nmethods_do(cf); } void Threads::metadata_do(void f(Metadata*)) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/thread.hpp --- a/hotspot/src/share/vm/runtime/thread.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -509,9 +509,6 @@ } } - // Sweeper support - void nmethods_do(CodeBlobClosure* cf); - // jvmtiRedefineClasses support void metadata_handles_do(void f(Metadata*)); @@ -1649,7 +1646,7 @@ void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // Sweeper operations - void nmethods_do(CodeBlobClosure* cf); + virtual void nmethods_do(CodeBlobClosure* cf); // RedefineClasses Support void metadata_do(void f(Metadata*)); @@ -1997,10 +1994,10 @@ bool is_hidden_from_external_view() const { return true; } bool is_Code_cache_sweeper_thread() const { return true; } - // GC support - // Apply "f->do_oop" to all root oops in "this". - // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames + + // Prevent GC from unloading _scanned_nmethod void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); + void nmethods_do(CodeBlobClosure* cf); }; // A thread used for Compilation. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/timer.cpp --- a/hotspot/src/share/vm/runtime/timer.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/timer.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -114,54 +114,6 @@ return os::elapsed_counter() - _counter; } -TraceTime::TraceTime(const char* title, - bool doit, - LogTagType tag) { - _active = doit; - _verbose = true; - _tag = tag; - _title = title; - - if (_active) { - _accum = NULL; - _t.start(); - } -} - -TraceTime::TraceTime(const char* title, - elapsedTimer* accumulator, - bool doit, - bool verbose, - LogTagType tag) { - _active = doit; - _verbose = verbose; - _tag = tag; - _title = title; - - if (_active) { - _accum = accumulator; - _t.start(); - } -} - -TraceTime::~TraceTime() { - if (_active) { - _t.stop(); - if (_accum!=NULL) _accum->add(_t); - if (_verbose) { - switch (_tag) { - case LogTag::_startuptime : - log_info(startuptime)("%s, %3.7f secs", _title, _t.seconds()); - break; - case LogTag::__NO_TAG : - default : - tty->print_cr("[%s, %3.7f secs]", _title, _t.seconds()); - tty->flush(); - } - } - } -} - TraceCPUTime::TraceCPUTime(bool doit, bool print_cr, outputStream *logfile) : @@ -216,3 +168,4 @@ _logfile->flush(); } } + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/timer.hpp --- a/hotspot/src/share/vm/runtime/timer.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/timer.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,7 +25,6 @@ #ifndef SHARE_VM_RUNTIME_TIMER_HPP #define SHARE_VM_RUNTIME_TIMER_HPP -#include "logging/logTag.hpp" #include "utilities/globalDefinitions.hpp" // Timers for simple measurement. @@ -73,43 +72,6 @@ jlong ticks_since_update() const; }; -// TraceTime is used for tracing the execution time of a block -// Usage: -// { TraceTime t("block time") -// some_code(); -// } -// - -class TraceTime: public StackObj { - private: - bool _active; // do timing - bool _verbose; // report every timing - elapsedTimer _t; // timer - elapsedTimer* _accum; // accumulator - const char* _title; // name of timer - LogTagType _tag; // stream to print to - - public: - // Constructors - TraceTime(const char* title, - bool doit = true, - LogTagType tag = LogTag::__NO_TAG); - TraceTime(const char* title, - elapsedTimer* accumulator, - bool doit = true, - bool verbose = false, - LogTagType tag = LogTag::__NO_TAG); - ~TraceTime(); - - // Accessors - void set_verbose(bool verbose) { _verbose = verbose; } - bool verbose() const { return _verbose; } - - // Activation - void suspend() { if (_active) _t.stop(); } - void resume() { if (_active) _t.start(); } -}; - class TraceCPUTime: public StackObj { private: bool _active; // true if times will be measured and printed diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/timerTrace.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/runtime/timerTrace.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/timerTrace.hpp" + +TraceTime::TraceTime(const char* title, + bool doit) { + _active = doit; + _verbose = true; + _title = title; + _print = NULL; + + if (_active) { + _accum = NULL; + _t.start(); + } +} + +TraceTime::TraceTime(const char* title, + elapsedTimer* accumulator, + bool doit, + bool verbose) { + _active = doit; + _verbose = verbose; + _title = title; + _print = NULL; + + if (_active) { + _accum = accumulator; + _t.start(); + } +} + +TraceTime::TraceTime(const char* title, + TraceTimerLogPrintFunc ttlpf) { + _active = ttlpf!= NULL; + _verbose = true; + _title = title; + _print = ttlpf; + + if (_active) { + _accum = NULL; + _t.start(); + } +} + +TraceTime::~TraceTime() { + if (!_active) { + return; + } + _t.stop(); + if (_accum != NULL) { + _accum->add(_t); + } + if (!_verbose) { + return; + } + if (_print) { + _print("%s, %3.7f secs", _title, _t.seconds()); + } else { + tty->print_cr("[%s, %3.7f secs]", _title, _t.seconds()); + tty->flush(); + } +} + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/timerTrace.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/runtime/timerTrace.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_TIMERTRACE_HPP +#define SHARE_VM_RUNTIME_TIMERTRACE_HPP + +#include "logging/log.hpp" +#include "utilities/globalDefinitions.hpp" + +// TraceTime is used for tracing the execution time of a block +// Usage: +// { +// TraceTime t("some timer", TIMERTRACE_LOG(Info, startuptime, tagX...)); +// some_code(); +// } +// + +typedef void (*TraceTimerLogPrintFunc)(const char*, ...); + +// We need to explicit take address of LogImpl<>write<> and static cast +// due to MSVC is not compliant with templates two-phase lookup +#define TRACETIME_LOG(TT_LEVEL, ...) \ + log_is_enabled(TT_LEVEL, __VA_ARGS__) ? static_cast(&LogImpl::write) : (TraceTimerLogPrintFunc)NULL + +class TraceTime: public StackObj { + private: + bool _active; // do timing + bool _verbose; // report every timing + elapsedTimer _t; // timer + elapsedTimer* _accum; // accumulator + const char* _title; // name of timer + TraceTimerLogPrintFunc _print; + + public: + // Constructors + TraceTime(const char* title, + bool doit = true); + + TraceTime(const char* title, + elapsedTimer* accumulator, + bool doit = true, + bool verbose = false); + + TraceTime(const char* title, + TraceTimerLogPrintFunc ttlpf); + + ~TraceTime(); + + // Accessors + void set_verbose(bool verbose) { _verbose = verbose; } + bool verbose() const { return _verbose; } + + // Activation + void suspend() { if (_active) _t.stop(); } + void resume() { if (_active) _t.start(); } +}; + + +#endif // SHARE_VM_RUNTIME_TIMERTRACE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/vmStructs.cpp --- a/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -54,6 +54,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/generation.hpp" #include "gc/shared/generationSpec.hpp" +#include "gc/shared/referencePendingListLocker.hpp" #include "gc/shared/space.hpp" #include "interpreter/bytecodeInterpreter.hpp" #include "interpreter/bytecodes.hpp" @@ -384,8 +385,8 @@ nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \ nonstatic_field(MethodCounters, _invoke_mask, int) \ nonstatic_field(MethodCounters, _backedge_mask, int) \ - nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ - nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ + COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_invocation_count, int)) \ + COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_throwout_count, u2)) \ nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \ @@ -1692,6 +1693,7 @@ declare_type(JavaThread, Thread) \ declare_type(JvmtiAgentThread, JavaThread) \ declare_type(ServiceThread, JavaThread) \ + declare_type(ReferencePendingListLockerThread, JavaThread) \ declare_type(CompilerThread, JavaThread) \ declare_type(CodeCacheSweeperThread, JavaThread) \ declare_toplevel_type(OSThread) \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/vmThread.cpp --- a/hotspot/src/share/vm/runtime/vmThread.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/vmThread.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -32,6 +32,7 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" +#include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" @@ -352,14 +353,16 @@ op->evaluate(); if (event.should_commit()) { - bool is_concurrent = op->evaluate_concurrently(); + const bool is_concurrent = op->evaluate_concurrently(); + const bool evaluate_at_safepoint = op->evaluate_at_safepoint(); event.set_operation(op->type()); - event.set_safepoint(op->evaluate_at_safepoint()); + event.set_safepoint(evaluate_at_safepoint); event.set_blocking(!is_concurrent); // Only write caller thread information for non-concurrent vm operations. // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. // This is because the caller thread could have exited already. event.set_caller(is_concurrent ? 0 : THREAD_TRACE_ID(op->calling_thread())); + event.set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_counter() : 0); event.commit(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/vm_operations.cpp --- a/hotspot/src/share/vm/runtime/vm_operations.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/vm_operations.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -59,7 +59,7 @@ outputStream* debugstream; bool enabled = log_is_enabled(Debug, vmoperation); if (enabled) { - debugstream = LogHandle(vmoperation)::debug_stream(); + debugstream = Log(vmoperation)::debug_stream(); debugstream->print("begin "); print_on_error(debugstream); debugstream->cr(); @@ -105,6 +105,14 @@ } } +void VM_ClearICs::doit() { + if (_preserve_static_stubs) { + CodeCache::cleanup_inline_caches(); + } else { + CodeCache::clear_inline_caches(); + } +} + void VM_Deoptimize::doit() { // We do not want any GCs to happen while we are in the middle of this VM operation ResourceMark rm; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/runtime/vm_operations.hpp --- a/hotspot/src/share/vm/runtime/vm_operations.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/runtime/vm_operations.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -231,9 +231,11 @@ }; class VM_ClearICs: public VM_Operation { + private: + bool _preserve_static_stubs; public: - VM_ClearICs() {} - void doit() { CodeCache::clear_inline_caches(); } + VM_ClearICs(bool preserve_static_stubs) { _preserve_static_stubs = preserve_static_stubs; } + void doit(); VMOp_Type type() const { return VMOp_ClearICs; } }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/classLoadingService.cpp --- a/hotspot/src/share/vm/services/classLoadingService.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/classLoadingService.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/diagnosticCommand.cpp --- a/hotspot/src/share/vm/services/diagnosticCommand.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -28,6 +28,7 @@ #include "compiler/compileBroker.hpp" #include "compiler/directivesParser.hpp" #include "gc/shared/vmGCOperations.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" #include "runtime/javaCalls.hpp" @@ -61,17 +62,19 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); -#if INCLUDE_SERVICES // Heap dumping/inspection supported +#if INCLUDE_SERVICES DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); +#if INCLUDE_JVMTI // Both JVMTI and SERVICES have to be enabled to have this dcmd + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); +#endif // INCLUDE_JVMTI #endif // INCLUDE_SERVICES #if INCLUDE_JVMTI DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); - DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #endif // INCLUDE_JVMTI DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); @@ -255,6 +258,7 @@ } } +#if INCLUDE_SERVICES JVMTIAgentLoadDCmd::JVMTIAgentLoadDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap), _libpath("library path", "Absolute path of the JVMTI agent to load.", @@ -314,6 +318,7 @@ return 0; } } +#endif // INCLUDE_SERVICES void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) { // load VMSupport diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/diagnosticCommand.hpp --- a/hotspot/src/share/vm/services/diagnosticCommand.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -174,6 +174,8 @@ virtual void execute(DCmdSource source, TRAPS); }; +#if INCLUDE_SERVICES +#if INCLUDE_JVMTI class JVMTIAgentLoadDCmd : public DCmdWithParser { protected: DCmdArgument _libpath; @@ -193,6 +195,8 @@ static int num_arguments(); virtual void execute(DCmdSource source, TRAPS); }; +#endif // INCLUDE_JVMTI +#endif // INCLUDE_SERVICES class VMDynamicLibrariesDCmd : public DCmd { public: diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/diagnosticFramework.cpp --- a/hotspot/src/share/vm/services/diagnosticFramework.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/diagnosticFramework.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/gcNotifier.cpp --- a/hotspot/src/share/vm/services/gcNotifier.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/gcNotifier.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -37,6 +37,7 @@ #include "services/memoryService.hpp" #include "memoryManager.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" GCNotificationRequest *GCNotifier::first_request = NULL; GCNotificationRequest *GCNotifier::last_request = NULL; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/heapDumper.cpp --- a/hotspot/src/share/vm/services/heapDumper.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/heapDumper.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/vmGCOperations.hpp" +#include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" @@ -459,7 +460,7 @@ // if the open failed we record the error if (_fd < 0) { - _error = (char*)os::strdup(strerror(errno)); + _error = (char*)os::strdup(os::strerror(errno)); } } @@ -509,7 +510,7 @@ if (n < 0) { // EINTR cannot happen here, os::write will take care of that - set_error(strerror(errno)); + set_error(os::strerror(errno)); os::close(file_descriptor()); set_file_descriptor(-1); return; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/lowMemoryDetector.cpp --- a/hotspot/src/share/vm/services/lowMemoryDetector.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/lowMemoryDetector.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/services/threadService.cpp --- a/hotspot/src/share/vm/services/threadService.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/services/threadService.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "memory/heapInspection.hpp" #include "memory/oopFactory.hpp" +#include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/shark/sharkCompiler.cpp --- a/hotspot/src/share/vm/shark/sharkCompiler.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,6 +32,7 @@ #include "code/oopRecorder.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/oopMap.hpp" +#include "memory/resourceArea.hpp" #include "shark/llvmHeaders.hpp" #include "shark/sharkBuilder.hpp" #include "shark/sharkCodeBuffer.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/shark/sharkInliner.cpp --- a/hotspot/src/share/vm/shark/sharkInliner.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/shark/sharkInliner.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,7 @@ #include "ci/ciStreams.hpp" #include "interpreter/bytecodes.hpp" #include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" #include "shark/sharkBlock.hpp" #include "shark/sharkConstant.hpp" #include "shark/sharkInliner.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/trace/trace.dtd --- a/hotspot/src/share/vm/trace/trace.dtd Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/trace/trace.dtd Wed Jul 05 21:35:27 2017 +0200 @@ -23,7 +23,7 @@ --> - + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/trace/trace.xml --- a/hotspot/src/share/vm/trace/trace.xml Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/trace/trace.xml Wed Jul 05 21:35:27 2017 +0200 @@ -30,572 +30,10 @@ ]> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/trace/traceMacros.hpp --- a/hotspot/src/share/vm/trace/traceMacros.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/trace/traceMacros.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ #define EVENT_THREAD_EXIT(thread) #define EVENT_THREAD_DESTRUCT(thread) #define TRACE_KLASS_CREATION(k, p, t) +#define TRACE_KLASS_DEFINITION(k, t) #define TRACE_INIT_KLASS_ID(k) #define TRACE_INIT_MODULE_ID(m) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/trace/traceevents.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/trace/traceevents.xml Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,618 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/trace/tracerelationdecls.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/trace/tracerelationdecls.xml Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,35 @@ + + + + + + + + + + + + + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/bitMap.cpp --- a/hotspot/src/share/vm/utilities/bitMap.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/bitMap.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,19 +29,25 @@ #include "utilities/bitMap.inline.hpp" #include "utilities/copy.hpp" -BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) : - _map(map), _size(size_in_bits), _map_allocator(false) +STATIC_ASSERT(sizeof(BitMap::bm_word_t) == BytesPerWord); // "Implementation assumption." + +BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) : + _map(NULL), _size(0) { - assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); + resize(size_in_bits, in_resource_area); } +#ifdef ASSERT +void BitMap::verify_index(idx_t index) const { + assert(index < _size, "BitMap index out of bounds"); +} -BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) : - _map(NULL), _size(0), _map_allocator(false) -{ - assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); - resize(size_in_bits, in_resource_area); +void BitMap::verify_range(idx_t beg_index, idx_t end_index) const { + assert(beg_index <= end_index, "BitMap range error"); + // Note that [0,0) and [size,size) are both valid ranges. + if (end_index != _size) verify_index(end_index); } +#endif // #ifdef ASSERT void BitMap::resize(idx_t size_in_bits, bool in_resource_area) { idx_t old_size_in_words = size_in_words(); @@ -54,7 +60,7 @@ Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map, MIN2(old_size_in_words, new_size_in_words)); } else { - _map = _map_allocator.reallocate(new_size_in_words); + _map = ArrayAllocator::reallocate(old_map, old_size_in_words, new_size_in_words); } if (new_size_in_words > old_size_in_words) { @@ -157,8 +163,10 @@ idx_t beg_full_word = word_index_round_up(beg); idx_t end_full_word = word_index(end); - assert(end_full_word - beg_full_word >= 32, - "the range must include at least 32 bytes"); + if (end_full_word - beg_full_word < 32) { + clear_range(beg, end); + return; + } // The range includes at least one full word. clear_range_within_word(beg, bit_index(beg_full_word)); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/bitMap.hpp --- a/hotspot/src/share/vm/utilities/bitMap.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/bitMap.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -48,7 +48,6 @@ } RangeSizeHint; private: - ArrayAllocator _map_allocator; bm_word_t* _map; // First word in bitmap idx_t _size; // Size of bitmap (in bits) @@ -101,9 +100,8 @@ idx_t word_index_round_up(idx_t bit) const; // Verification. - inline void verify_index(idx_t index) const NOT_DEBUG_RETURN; - inline void verify_range(idx_t beg_index, idx_t end_index) const - NOT_DEBUG_RETURN; + void verify_index(idx_t index) const NOT_DEBUG_RETURN; + void verify_range(idx_t beg_index, idx_t end_index) const NOT_DEBUG_RETURN; // Statistics. static idx_t* _pop_count_table; @@ -114,10 +112,10 @@ public: // Constructs a bitmap with no map, and size 0. - BitMap() : _map(NULL), _size(0), _map_allocator(false) {} + BitMap() : _map(NULL), _size(0) {} // Constructs a bitmap with the given map and size. - BitMap(bm_word_t* map, idx_t size_in_bits); + BitMap(bm_word_t* map, idx_t size_in_bits) :_map(map), _size(size_in_bits) {} // Constructs an empty bitmap of the given size (that is, this clears the // new bitmap). Allocates the map array in resource area if @@ -307,36 +305,12 @@ return _map.size() / _bits_per_slot; } - bool is_valid_index(idx_t slot_index, idx_t bit_within_slot_index) { - verify_bit_within_slot_index(bit_within_slot_index); - return (bit_index(slot_index, bit_within_slot_index) < size_in_bits()); - } - - bool at(idx_t slot_index, idx_t bit_within_slot_index) const { - verify_bit_within_slot_index(bit_within_slot_index); - return _map.at(bit_index(slot_index, bit_within_slot_index)); - } - - void set_bit(idx_t slot_index, idx_t bit_within_slot_index) { - verify_bit_within_slot_index(bit_within_slot_index); - _map.set_bit(bit_index(slot_index, bit_within_slot_index)); - } - - void clear_bit(idx_t slot_index, idx_t bit_within_slot_index) { - verify_bit_within_slot_index(bit_within_slot_index); - _map.clear_bit(bit_index(slot_index, bit_within_slot_index)); - } - - void at_put(idx_t slot_index, idx_t bit_within_slot_index, bool value) { - verify_bit_within_slot_index(bit_within_slot_index); - _map.at_put(bit_index(slot_index, bit_within_slot_index), value); - } - - void at_put_grow(idx_t slot_index, idx_t bit_within_slot_index, bool value) { - verify_bit_within_slot_index(bit_within_slot_index); - _map.at_put_grow(bit_index(slot_index, bit_within_slot_index), value); - } - + bool is_valid_index(idx_t slot_index, idx_t bit_within_slot_index); + bool at(idx_t slot_index, idx_t bit_within_slot_index) const; + void set_bit(idx_t slot_index, idx_t bit_within_slot_index); + void clear_bit(idx_t slot_index, idx_t bit_within_slot_index); + void at_put(idx_t slot_index, idx_t bit_within_slot_index, bool value); + void at_put_grow(idx_t slot_index, idx_t bit_within_slot_index, bool value); void clear(); }; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/bitMap.inline.hpp --- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,18 +28,6 @@ #include "runtime/atomic.inline.hpp" #include "utilities/bitMap.hpp" -#ifdef ASSERT -inline void BitMap::verify_index(idx_t index) const { - assert(index < _size, "BitMap index out of bounds"); -} - -inline void BitMap::verify_range(idx_t beg_index, idx_t end_index) const { - assert(beg_index <= end_index, "BitMap range error"); - // Note that [0,0) and [size,size) are both valid ranges. - if (end_index != _size) verify_index(end_index); -} -#endif // #ifdef ASSERT - inline void BitMap::set_bit(idx_t bit) { verify_index(bit); *word_addr(bit) |= bit_mask(bit); @@ -105,7 +93,7 @@ } inline void BitMap::clear_range(idx_t beg, idx_t end, RangeSizeHint hint) { - if (hint == small_range && end - beg == 1) { + if (end - beg == 1) { clear_bit(beg); } else { if (hint == large_range) { @@ -344,6 +332,36 @@ return get_next_zero_offset_inline(l_offset, r_offset); } +inline bool BitMap2D::is_valid_index(idx_t slot_index, idx_t bit_within_slot_index) { + verify_bit_within_slot_index(bit_within_slot_index); + return (bit_index(slot_index, bit_within_slot_index) < size_in_bits()); +} + +inline bool BitMap2D::at(idx_t slot_index, idx_t bit_within_slot_index) const { + verify_bit_within_slot_index(bit_within_slot_index); + return _map.at(bit_index(slot_index, bit_within_slot_index)); +} + +inline void BitMap2D::set_bit(idx_t slot_index, idx_t bit_within_slot_index) { + verify_bit_within_slot_index(bit_within_slot_index); + _map.set_bit(bit_index(slot_index, bit_within_slot_index)); +} + +inline void BitMap2D::clear_bit(idx_t slot_index, idx_t bit_within_slot_index) { + verify_bit_within_slot_index(bit_within_slot_index); + _map.clear_bit(bit_index(slot_index, bit_within_slot_index)); +} + +inline void BitMap2D::at_put(idx_t slot_index, idx_t bit_within_slot_index, bool value) { + verify_bit_within_slot_index(bit_within_slot_index); + _map.at_put(bit_index(slot_index, bit_within_slot_index), value); +} + +inline void BitMap2D::at_put_grow(idx_t slot_index, idx_t bit_within_slot_index, bool value) { + verify_bit_within_slot_index(bit_within_slot_index); + _map.at_put_grow(bit_index(slot_index, bit_within_slot_index), value); +} + inline void BitMap2D::clear() { _map.clear(); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/debug.cpp --- a/hotspot/src/share/vm/utilities/debug.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/debug.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -224,6 +224,11 @@ va_end(detail_args); } +void report_vm_status_error(const char* file, int line, const char* error_msg, + int status, const char* detail) { + report_vm_error(file, line, error_msg, "error %s(%d), %s", os::errno_name(status), status, detail); +} + void report_fatal(const char* file, int line, const char* detail_fmt, ...) { if (Debugging || error_is_suppressed(file, line)) return; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/debug.hpp --- a/hotspot/src/share/vm/utilities/debug.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/debug.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,7 +137,13 @@ // an extra arg and use strerror to convert it to a meaningful string // like "Invalid argument", "out of memory" etc #define vmassert_status(p, status, msg) \ - vmassert(p, "error %s(%d), %s", strerror(status), status, msg) +do { \ + if (!(p)) { \ + report_vm_status_error(__FILE__, __LINE__, "assert(" #p ") failed", \ + status, msg); \ + BREAKPOINT; \ + } \ +} while (0) // For backward compatibility. #define assert_status(p, status, msg) vmassert_status(p, status, msg) @@ -209,6 +215,8 @@ void report_vm_error(const char* file, int line, const char* error_msg, const char* detail_fmt, ...); #endif +void report_vm_status_error(const char* file, int line, const char* error_msg, + int status, const char* detail); void report_fatal(const char* file, int line, const char* detail_fmt, ...) ATTRIBUTE_PRINTF(3, 4); void report_vm_out_of_memory(const char* file, int line, size_t size, VMErrorType vm_err_type, const char* detail_fmt, ...) ATTRIBUTE_PRINTF(5, 6); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/exceptions.cpp --- a/hotspot/src/share/vm/utilities/exceptions.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/exceptions.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -27,6 +27,7 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "logging/log.hpp" +#include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" @@ -52,11 +53,11 @@ } void ThreadShadow::clear_pending_exception() { - if (TraceClearedExceptions) { - if (_pending_exception != NULL) { - tty->print_cr("Thread::clear_pending_exception: cleared exception:"); - _pending_exception->print(); - } + if (_pending_exception != NULL && log_is_enabled(Debug, exceptions)) { + ResourceMark rm; + outputStream* logst = Log(exceptions)::debug_stream(); + logst->print("Thread::clear_pending_exception: cleared exception:"); + _pending_exception->print_on(logst); } _pending_exception = NULL; _exception_file = NULL; @@ -508,12 +509,13 @@ ResourceMark rm; Symbol* message = java_lang_Throwable::detail_message(exception()); if (message != NULL) { - log_info(exceptions)("Exception <%s: %s> (" INTPTR_FORMAT ")\n thrown in %s", + log_info(exceptions)("Exception <%s: %s>\n thrown in %s", exception->print_value_string(), - message->as_C_string(), p2i(exception()), tempst.as_string()); + message->as_C_string(), + tempst.as_string()); } else { - log_info(exceptions)("Exception <%s> (" INTPTR_FORMAT ")\n thrown in %s", + log_info(exceptions)("Exception <%s>\n thrown in %s", exception->print_value_string(), - p2i(exception()), tempst.as_string()); + tempst.as_string()); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions.cpp --- a/hotspot/src/share/vm/utilities/globalDefinitions.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -358,6 +358,20 @@ return size_t(result); } + +// Test that nth_bit macro and friends behave as +// expected, even with low-precedence operators. + +STATIC_ASSERT(nth_bit(3) == 0x8); +STATIC_ASSERT(nth_bit(1|2) == 0x8); + +STATIC_ASSERT(right_n_bits(3) == 0x7); +STATIC_ASSERT(right_n_bits(1|2) == 0x7); + +STATIC_ASSERT(left_n_bits(3) == (intptr_t) LP64_ONLY(0xE000000000000000) NOT_LP64(0xE0000000)); +STATIC_ASSERT(left_n_bits(1|2) == (intptr_t) LP64_ONLY(0xE000000000000000) NOT_LP64(0xE0000000)); + + #ifndef PRODUCT // For unit testing only class GlobalDefinitions { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -42,6 +42,12 @@ # include "utilities/globalDefinitions_xlc.hpp" #endif +#ifndef NOINLINE +#define NOINLINE +#endif +#ifndef ALWAYSINLINE +#define ALWAYSINLINE inline +#endif #ifndef PRAGMA_DIAG_PUSH #define PRAGMA_DIAG_PUSH #endif @@ -1084,9 +1090,9 @@ // get a word with the n.th or the right-most or left-most n bits set // (note: #define used only so that they can be used in enum constant definitions) -#define nth_bit(n) (n >= BitsPerWord ? 0 : OneBit << (n)) +#define nth_bit(n) (((n) >= BitsPerWord) ? 0 : (OneBit << (n))) #define right_n_bits(n) (nth_bit(n) - 1) -#define left_n_bits(n) (right_n_bits(n) << (n >= BitsPerWord ? 0 : (BitsPerWord - n))) +#define left_n_bits(n) (right_n_bits(n) << (((n) >= BitsPerWord) ? 0 : (BitsPerWord - (n)))) // bit-operations using a mask m inline void set_bits (intptr_t& x, intptr_t m) { x |= m; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -322,4 +322,8 @@ #define THREAD_LOCAL_DECL __thread #endif +// Inlining support +#define NOINLINE __attribute__ ((noinline)) +#define ALWAYSINLINE __attribute__ ((always_inline)) + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -277,4 +277,8 @@ #define THREAD_LOCAL_DECL __thread #endif +// Inlining support +#define NOINLINE +#define ALWAYSINLINE __attribute__((always_inline)) + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,4 +240,11 @@ #define THREAD_LOCAL_DECL __declspec( thread ) #endif +// Inlining support +// MSVC has '__declspec(noinline)' but according to the official documentation +// it only applies to member functions. There are reports though which pretend +// that it also works for freestanding functions. +#define NOINLINE __declspec(noinline) +#define ALWAYSINLINE __forceinline + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,6 +1,6 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2013 SAP SE. All rights reserved. + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -184,4 +184,8 @@ #define THREAD_LOCAL_DECL __thread #endif +// Inlining support +#define NOINLINE +#define ALWAYSINLINE __attribute__((always_inline)) + #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/internalVMTests.cpp --- a/hotspot/src/share/vm/utilities/internalVMTests.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/internalVMTests.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -67,8 +67,16 @@ run_unit_test(Test_linked_list); run_unit_test(TestChunkedList_test); run_unit_test(JSON_test); - run_unit_test(Test_log_length); + run_unit_test(Test_logtarget); + run_unit_test(Test_logstream); + run_unit_test(Test_loghandle); + run_unit_test(Test_logtargethandle); + run_unit_test(Test_log_gctracetime); run_unit_test(Test_configure_stdout); + run_unit_test(Test_logconfiguration_subscribe); + run_unit_test(Test_log_prefix); + run_unit_test(Test_log_big); + run_unit_test(Test_logtagset_duplicates); run_unit_test(DirectivesParser_test); run_unit_test(Test_TempNewSymbol); #if INCLUDE_VM_STRUCTS diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/macros.hpp --- a/hotspot/src/share/vm/utilities/macros.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/macros.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -206,6 +206,17 @@ #define NOT_COMPILER2(code) code #endif // COMPILER2 +// COMPILER2 or JVMCI +#if defined(COMPILER2) || INCLUDE_JVMCI +#define COMPILER2_OR_JVMCI 1 +#define COMPILER2_OR_JVMCI_PRESENT(code) code +#define NOT_COMPILER2_OR_JVMCI(code) +#else +#define COMPILER2_OR_JVMCI 0 +#define COMPILER2_OR_JVMCI_PRESENT(code) +#define NOT_COMPILER2_OR_JVMCI(code) code +#endif + #ifdef TIERED #define TIERED_ONLY(code) code #define NOT_TIERED(code) diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/ostream.cpp --- a/hotspot/src/share/vm/utilities/ostream.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/ostream.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -24,8 +24,6 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" -#include "gc/shared/gcId.hpp" -#include "gc/shared/gcId.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/os.hpp" @@ -504,7 +502,7 @@ if (_file != NULL) { _need_close = true; } else { - warning("Cannot open file %s due to %s\n", file_name, strerror(errno)); + warning("Cannot open file %s due to %s\n", file_name, os::strerror(errno)); _need_close = false; } } @@ -514,7 +512,7 @@ if (_file != NULL) { _need_close = true; } else { - warning("Cannot open file %s due to %s\n", file_name, strerror(errno)); + warning("Cannot open file %s due to %s\n", file_name, os::strerror(errno)); _need_close = false; } } @@ -1099,14 +1097,3 @@ } #endif - -void logStream::write(const char* s, size_t len) { - if (len > 0 && s[len - 1] == '\n') { - _current_line.write(s, len - 1); - _log_func("%s", _current_line.as_string()); - _current_line.reset(); - } else { - _current_line.write(s, len); - } - update_position(s, len); -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/ostream.hpp --- a/hotspot/src/share/vm/utilities/ostream.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/ostream.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -29,7 +29,6 @@ #include "runtime/timer.hpp" #include "utilities/globalDefinitions.hpp" -class GCId; DEBUG_ONLY(class ResourceMark;) // Output streams for printing @@ -247,18 +246,6 @@ void flush() {}; }; -class logStream : public outputStream { -private: - stringStream _current_line; - void (*_log_func)(const char* fmt, ...) ATTRIBUTE_PRINTF(1, 2); -public: - void write(const char* s, size_t len); - logStream(void (*log_func)(const char* fmt, ...)) : _log_func(log_func) {} - ~logStream() { - guarantee(_current_line.size() == 0, "Buffer not flushed. Missing call to print_cr()?"); - } -}; - void ostream_init(); void ostream_init_log(); void ostream_exit(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/stack.inline.hpp --- a/hotspot/src/share/vm/utilities/stack.inline.hpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/stack.inline.hpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,17 +27,6 @@ #include "utilities/stack.hpp" -// Stack is used by the GC code and in some hot paths a lot of the Stack -// code gets inlined. This is generally good, but when too much code has -// been inlined, no further inlining is allowed by GCC. Therefore we need -// to prevent parts of the slow path in Stack to be inlined to allow other -// code to be. -#if defined(TARGET_COMPILER_gcc) -#define NOINLINE __attribute__((noinline)) -#else -#define NOINLINE -#endif - template StackBase::StackBase(size_t segment_size, size_t max_cache_size, size_t max_size): _seg_size(segment_size), @@ -151,6 +140,11 @@ FREE_C_HEAP_ARRAY(char, (char*) addr); } +// Stack is used by the GC code and in some hot paths a lot of the Stack +// code gets inlined. This is generally good, but when too much code has +// been inlined, no further inlining is allowed by GCC. Therefore we need +// to prevent parts of the slow path in Stack to be inlined to allow other +// code to be. template NOINLINE void Stack::push_segment() { @@ -280,6 +274,4 @@ return _cur_seg + --_cur_seg_size; } -#undef NOINLINE - #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/vmError.cpp --- a/hotspot/src/share/vm/utilities/vmError.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/vmError.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1260,8 +1260,9 @@ out.print_raw("#\n# Compiler replay data is saved as:\n# "); out.print_raw_cr(buffer); } else { + int e = errno; out.print_raw("#\n# Can't open file to dump replay data. Error: "); - out.print_raw_cr(strerror(os::get_last_error())); + out.print_raw_cr(os::strerror(e)); } } } @@ -1301,7 +1302,8 @@ out.print_raw_cr("\" ..."); if (os::fork_and_exec(cmd) < 0) { - out.print_cr("os::fork_and_exec failed: %s (%d)", strerror(errno), errno); + out.print_cr("os::fork_and_exec failed: %s (%s=%d)", + os::strerror(errno), os::errno_name(errno), errno); } } @@ -1359,7 +1361,8 @@ tty->print_cr("\"%s\"...", cmd); if (os::fork_and_exec(cmd) < 0) { - tty->print_cr("os::fork_and_exec failed: %s (%d)", strerror(errno), errno); + tty->print_cr("os::fork_and_exec failed: %s (%s=%d)", + os::strerror(errno), os::errno_name(errno), errno); } } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/src/share/vm/utilities/xmlstream.cpp --- a/hotspot/src/share/vm/utilities/xmlstream.cpp Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/src/share/vm/utilities/xmlstream.cpp Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "code/nmethod.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" #include "oops/oop.inline.hpp" diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/Makefile --- a/hotspot/test/Makefile Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/Makefile Wed Jul 05 21:35:27 2017 +0200 @@ -159,6 +159,24 @@ JTREG_NATIVE_PATH = -nativepath:$(shell $(GETMIXEDPATH) "$(TESTNATIVE_DIR)/hotspot/jtreg/native") endif +# jtreg failure handler config +ifeq ($(FAILURE_HANDLER_DIR), ) + ifneq ($(TESTNATIVE_DIR), ) + FAILURE_HANDLER_DIR := $(TESTNATIVE_DIR)/failure_handler + endif +endif +ifneq ($(FAILURE_HANDLER_DIR), ) + FAILURE_HANDLER_DIR_MIXED := $(shell $(GETMIXEDPATH) "$(FAILURE_HANDLER_DIR)") + JTREG_FAILURE_HANDLER_OPTIONS := \ + -timeoutHandlerDir:$(FAILURE_HANDLER_DIR_MIXED)/jtregFailureHandler.jar \ + -observerDir:$(FAILURE_HANDLER_DIR_MIXED)/jtregFailureHandler.jar \ + -timeoutHandler:jdk.test.failurehandler.jtreg.GatherProcessInfoTimeoutHandler \ + -observer:jdk.test.failurehandler.jtreg.GatherDiagnosticInfoObserver + ifeq ($(PLATFORM), windows) + JTREG_FAILURE_HANDLER_OPTIONS += -J-Djava.library.path="$(FAILURE_HANDLER_DIR_MIXED)" + endif +endif + # Expect JPRT to set JPRT_ARCHIVE_BUNDLE (path to zip bundle for results) ARCHIVE_BUNDLE = $(ABS_TEST_OUTPUT_DIR)/ARCHIVE_BUNDLE.zip ifdef JPRT_ARCHIVE_BUNDLE @@ -322,6 +340,7 @@ -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTwork") \ -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ $(JTREG_NATIVE_PATH) \ + $(JTREG_FAILURE_HANDLER_OPTIONS) \ $(JTREG_EXCLUSIONS) \ $(JTREG_TEST_OPTIONS) \ $(TEST_SELECTION) \ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/TEST.groups --- a/hotspot/test/TEST.groups Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/TEST.groups Wed Jul 05 21:35:27 2017 +0200 @@ -52,6 +52,25 @@ hotspot_all = \ / + +hotspot_compiler = \ + compiler + +hotspot_gc = \ + gc + +hotspot_runtime = \ + runtime + +hotspot_serviceability = \ + serviceability + +hotspot_misc = \ + / \ + -:hotspot_compiler \ + -:hotspot_gc \ + -:hotspot_runtime \ + -:hotspot_serviceability # Full JDK can run all tests # @@ -230,6 +249,7 @@ # needs_g1gc = \ compiler/regalloc/C1ObjectSpillInLogicOp.java \ + gc/TestHumongousReferenceObject.java \ gc/TestSmallHeap.java \ gc/TestSystemGC.java \ gc/arguments/TestAlignmentToUseLargePages.java \ @@ -253,7 +273,7 @@ hotspot_native_sanity = \ native_sanity -hotspot_compiler_1 = \ +hotspot_fast_compiler_1 = \ compiler/arraycopy/ \ compiler/c1/ \ compiler/c2/ \ @@ -268,7 +288,7 @@ -compiler/c2/7070134 \ -compiler/c2/8004867 -hotspot_compiler_2 = \ +hotspot_fast_compiler_2 = \ compiler/classUnloading/ \ compiler/codecache/ \ compiler/codegen/ \ @@ -287,7 +307,7 @@ -compiler/codecache/stress \ -compiler/gcbarriers/PreserveFPRegistersTest.java -hotspot_compiler_3 = \ +hotspot_fast_compiler_3 = \ compiler/intrinsics/ \ compiler/jsr292/ \ compiler/loopopts/ \ @@ -308,22 +328,27 @@ -compiler/loopopts/7052494 \ -compiler/runtime/6826736 -hotspot_compiler_closed = \ +hotspot_fast_compiler_closed = \ sanity/ExecuteInternalVMTests.java -hotspot_gc = \ +hotspot_fast_gc_1 = \ + gc/g1/ + +hotspot_fast_gc_2 = \ sanity/ExecuteInternalVMTests.java \ gc/ \ - -gc/g1/TestGreyReclaimedHumongousObjects.java \ + -gc/g1/ \ + -gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \ + -gc/cms/TestMBeanCMS.java \ -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java -hotspot_gc_closed = \ +hotspot_fast_gc_closed = \ sanity/ExecuteInternalVMTests.java -hotspot_gc_gcold = \ +hotspot_fast_gc_gcold = \ stress/gc/TestGCOld.java -hotspot_runtime = \ +hotspot_fast_runtime = \ runtime/ \ -runtime/ErrorHandling/ErrorHandler.java \ -runtime/RedefineObject/TestRedefineObject.java \ @@ -335,6 +360,15 @@ -runtime/memory/ReserveMemory.java \ -runtime/memory/RunUnitTestsConcurrently.java \ -runtime/Unsafe/RangeCheck.java \ + -runtime/SelectionResolution/AbstractMethodErrorTest.java \ + -runtime/SelectionResolution/IllegalAccessErrorTest.java \ + -runtime/SelectionResolution/InvokeInterfaceICCE.java \ + -runtime/SelectionResolution/InvokeInterfaceSuccessTest.java \ + -runtime/SelectionResolution/InvokeSpecialICCE.java \ + -runtime/SelectionResolution/InvokeSpecialSuccessTest.java \ + -runtime/SelectionResolution/InvokeStaticICCE.java \ + -runtime/SelectionResolution/InvokeVirtualICCE.java \ + -runtime/SelectionResolution/InvokeVirtualSuccessTest.java \ -runtime/SharedArchiveFile/CdsSameObjectAlignment.java \ -runtime/SharedArchiveFile/DefaultUseWithClient.java \ -runtime/Thread/CancellableThreadTest.java \ @@ -343,21 +377,21 @@ sanity/ \ testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java -hotspot_serviceability = \ +hotspot_fast_serviceability = \ sanity/ExecuteInternalVMTests.java \ serviceability/dcmd/compiler \ serviceability/logging hotspot_jprt = \ - :hotspot_compiler_1 \ - :hotspot_compiler_2 \ - :hotspot_compiler_3 \ - :hotspot_compiler_closed \ - :hotspot_gc \ - :hotspot_gc_closed \ - :hotspot_gc_gcold \ - :hotspot_runtime \ - :hotspot_serviceability + :hotspot_fast_compiler_1 \ + :hotspot_fast_compiler_2 \ + :hotspot_fast_compiler_3 \ + :hotspot_fast_compiler_closed \ + :hotspot_fast_gc \ + :hotspot_fast_gc_closed \ + :hotspot_fast_gc_gcold \ + :hotspot_fast_runtime \ + :hotspot_fast_serviceability #All tests that depends on nashorn extension. # diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/arguments/CheckCICompilerCount.java --- a/hotspot/test/compiler/arguments/CheckCICompilerCount.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/arguments/CheckCICompilerCount.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -160,7 +160,7 @@ } catch (RuntimeException e) { // Check if tiered compilation is available in this JVM // Version. Throw exception only if it is available. - if (!(tiered && out.getOutput().contains("TieredCompilation is disabled in this release."))) { + if (!(tiered && out.getOutput().contains("-XX:+TieredCompilation not supported in this VM"))) { throw new RuntimeException(e); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java --- a/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -344,7 +344,7 @@ } catch (RuntimeException e) { // Check if tiered compilation is available in this JVM // Version. Throw exception only if it is available. - if (!(tiered && out.getOutput().contains("TieredCompilation is disabled in this release."))) { + if (!(tiered && out.getOutput().contains("-XX:+TieredCompilation not supported in this VM"))) { throw new RuntimeException(e); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java --- a/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ out.shouldContain(NON_METHOD); } catch (RuntimeException e) { // Check if TieredCompilation is disabled (in a client VM) - if(!out.getOutput().contains("TieredCompilation is disabled in this release.")) { + if(!out.getOutput().contains("-XX:+TieredCompilation not supported in this VM")) { // Code cache is not segmented throw new RuntimeException("No code cache segmentation."); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java --- a/hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,25 +21,14 @@ * questions. */ -import java.io.File; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; - -import jdk.test.lib.*; - /* * @test * @bug 8050079 * @summary Compiles a monomorphic call to finalizeObject() on a modified java.lang.Object to test C1 CHA. - * @library /testlibrary - * @modules java.base/jdk.internal.misc - * java.management - * java.base/jdk.internal - * @ignore 8132924 - * @compile -XDignore.symbol.file java/lang/Object.java TestMonomorphicObjectCall.java - * @run main TestMonomorphicObjectCall + * @build java.base/java.lang.Object + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:-VerifyDependencies + * -XX:TieredStopAtLevel=1 -XX:CompileOnly=TestMonomorphicObjectCall::callFinalize + * -XX:CompileOnly=java.lang.Object::finalizeObject TestMonomorphicObjectCall */ public class TestMonomorphicObjectCall { @@ -51,32 +40,7 @@ } public static void main(String[] args) throws Throwable { - if (args.length == 0) { - byte[] bytecode = Files.readAllBytes(Paths.get(System.getProperty("test.classes") + File.separator + - "java" + File.separator + "lang" + File.separator + "Object.class")); - ClassFileInstaller.writeClassToDisk("java.lang.Object", bytecode, "mods/java.base"); - // Execute new instance with modified java.lang.Object - executeTestJvm(); - } else { - // Trigger compilation of 'callFinalize' - callFinalize(new Object()); - } - } - - public static void executeTestJvm() throws Throwable { - // Execute test with modified version of java.lang.Object - // in -Xbootclasspath. - String[] vmOpts = new String[] { - "-Xpatch:mods", - "-Xcomp", - "-XX:+IgnoreUnrecognizedVMOptions", - "-XX:-VerifyDependencies", - "-XX:CompileOnly=TestMonomorphicObjectCall::callFinalize", - "-XX:CompileOnly=Object::finalizeObject", - "-XX:TieredStopAtLevel=1", - TestMonomorphicObjectCall.class.getName(), - "true"}; - OutputAnalyzer output = ProcessTools.executeTestJvm(vmOpts); - output.shouldHaveExitValue(0); + // Trigger compilation of 'callFinalize' + callFinalize(new Object()); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/dependencies/MonomorphicObjectCall/java.base/java/lang/Object.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/dependencies/MonomorphicObjectCall/java.base/java/lang/Object.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1994, 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import jdk.internal.HotSpotIntrinsicCandidate; + +/** + * Slightly modified version of java.lang.Object that replaces + * finalize() by finalizeObject() to avoid overriding in subclasses. + */ +public class Object { + + @HotSpotIntrinsicCandidate + public Object() {} + + private static native void registerNatives(); + static { + registerNatives(); + } + + @HotSpotIntrinsicCandidate + public final native Class getClass(); + + @HotSpotIntrinsicCandidate + public native int hashCode(); + + public boolean equals(Object obj) { + return (this == obj); + } + + @HotSpotIntrinsicCandidate + protected native Object clone() throws CloneNotSupportedException; + + public String toString() { + return getClass().getName() + "@" + Integer.toHexString(hashCode()); + } + + @HotSpotIntrinsicCandidate + public final native void notify(); + + @HotSpotIntrinsicCandidate + public final native void notifyAll(); + + public final native void wait(long timeout) throws InterruptedException; + + public final void wait(long timeout, int nanos) throws InterruptedException { + if (timeout < 0) { + throw new IllegalArgumentException("timeout value is negative"); + } + + if (nanos < 0 || nanos > 999999) { + throw new IllegalArgumentException( + "nanosecond timeout value out of range"); + } + + if (nanos >= 500000 || (nanos != 0 && timeout == 0)) { + timeout++; + } + + wait(timeout); + } + + public final void wait() throws InterruptedException { + wait(0); + } + + /** + * Replaces original finalize() method and is therefore not + * overridden by any subclasses of Object. + * @throws Throwable + */ + // protected void finalize() throws Throwable { } + public void finalizeObject() throws Throwable { } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/dependencies/MonomorphicObjectCall/java/lang/Object.java --- a/hotspot/test/compiler/dependencies/MonomorphicObjectCall/java/lang/Object.java Mon Apr 18 16:18:56 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,97 +0,0 @@ -/* - * Copyright (c) 1994, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package java.lang; - -import jdk.internal.HotSpotIntrinsicCandidate; - -/** - * Slightly modified version of java.lang.Object that replaces - * finalize() by finalizeObject() to avoid overriding in subclasses. - */ -public class Object { - - @HotSpotIntrinsicCandidate - public Object() {} - - private static native void registerNatives(); - static { - registerNatives(); - } - - @HotSpotIntrinsicCandidate - public final native Class getClass(); - - @HotSpotIntrinsicCandidate - public native int hashCode(); - - public boolean equals(Object obj) { - return (this == obj); - } - - @HotSpotIntrinsicCandidate - protected native Object clone() throws CloneNotSupportedException; - - public String toString() { - return getClass().getName() + "@" + Integer.toHexString(hashCode()); - } - - @HotSpotIntrinsicCandidate - public final native void notify(); - - @HotSpotIntrinsicCandidate - public final native void notifyAll(); - - public final native void wait(long timeout) throws InterruptedException; - - public final void wait(long timeout, int nanos) throws InterruptedException { - if (timeout < 0) { - throw new IllegalArgumentException("timeout value is negative"); - } - - if (nanos < 0 || nanos > 999999) { - throw new IllegalArgumentException( - "nanosecond timeout value out of range"); - } - - if (nanos >= 500000 || (nanos != 0 && timeout == 0)) { - timeout++; - } - - wait(timeout); - } - - public final void wait() throws InterruptedException { - wait(0); - } - - /** - * Replaces original finalize() method and is therefore not - * overridden by any subclasses of Object. - * @throws Throwable - */ - // protected void finalize() throws Throwable { } - public void finalizeObject() throws Throwable { } -} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java --- a/hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java Wed Jul 05 21:35:27 2017 +0200 @@ -28,7 +28,7 @@ * @summary Add C2 x86 intrinsic for BigInteger::mulAdd() method * * @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch - * -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-UseSquareToLenIntrinsic -XX:-UseMultiplyToLenIntrinsic + * -XX:+IgnoreUnrecognizedVMOptions -XX:-UseSquareToLenIntrinsic -XX:-UseMultiplyToLenIntrinsic * -XX:CompileCommand=dontinline,TestMulAdd::main * -XX:CompileCommand=option,TestMulAdd::base_multiply,ccstr,DisableIntrinsic,_mulAdd * -XX:CompileCommand=option,java.math.BigInteger::multiply,ccstr,DisableIntrinsic,_mulAdd diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jsr292/InvokerGC.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/jsr292/InvokerGC.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8067247 + * @library /test/lib /compiler/whitebox / + * @run main/bootclasspath -Xcomp -Xbatch + * -XX:CompileCommand=compileonly,InvokerGC::test + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * InvokerGC + */ + +import java.lang.invoke.*; +import sun.hotspot.WhiteBox; + +public class InvokerGC { + static final WhiteBox WB = WhiteBox.getWhiteBox(); + + static MethodHandle mh; + static { + try { + mh = MethodHandles.lookup().findStatic(InvokerGC.class, "dummy", MethodType.methodType(void.class)); + } catch (Exception e) { + throw new Error(e); + } + } + + static void dummy() {} + + static void test() { + try { + mh.invoke(); + } catch (Throwable e) { + throw new Error(e); + } + } + + public static void main(String[] args) throws Throwable { + mh.invoke(); // Pre-generate an invoker for ()V signature + + test(); // trigger method compilation + test(); + + WB.fullGC(); // WB.fullGC has always clear softref policy. + + test(); + + WB.clearInlineCaches(true); // Preserve static stubs. + + test(); // Trigger call site re-resolution. Invoker LambdaForm should stay the same. + + System.out.println("TEST PASSED"); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestCase.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestCase.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestCase.java Wed Jul 05 21:35:27 2017 +0200 @@ -28,8 +28,8 @@ import java.util.Map; import jdk.vm.ci.hotspot.HotSpotResolvedObjectType; import sun.hotspot.WhiteBox; -import sun.reflect.ConstantPool; -import sun.reflect.ConstantPool.Tag; +import jdk.internal.reflect.ConstantPool; +import jdk.internal.reflect.ConstantPool.Tag; import compiler.jvmci.compilerToVM.ConstantPoolTestsHelper.DummyClasses; import static compiler.jvmci.compilerToVM.ConstantPoolTestCase.ConstantTypes.*; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java Wed Jul 05 21:35:27 2017 +0200 @@ -34,8 +34,8 @@ import jdk.internal.misc.SharedSecrets; import jdk.internal.org.objectweb.asm.Opcodes; import sun.hotspot.WhiteBox; -import sun.reflect.ConstantPool; -import sun.reflect.ConstantPool.Tag; +import jdk.internal.reflect.ConstantPool; +import jdk.internal.reflect.ConstantPool.Tag; /** * Class contains hard-coded constant pool tables for dummy classes used for diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupKlassRefIndexInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupMethodInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameAndTypeRefIndexInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupNameInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/LookupSignatureInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/MethodIsIgnoredBySecurityStackWalkTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/MethodIsIgnoredBySecurityStackWalkTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/MethodIsIgnoredBySecurityStackWalkTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -28,7 +28,8 @@ * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64") * @library /testlibrary /test/lib / * @library ../common/patches - * @modules java.base/jdk.internal.org.objectweb.asm + * @modules java.base/jdk.internal.reflect + * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot * jdk.vm.ci/jdk.vm.ci.code @@ -76,7 +77,7 @@ testCases.put(aClass.getMethod("invoke", Object.class, Object[].class), true); - aClass = Class.forName("sun.reflect.NativeMethodAccessorImpl"); + aClass = Class.forName("jdk.internal.reflect.NativeMethodAccessorImpl"); testCases.put(aClass.getMethod("invoke", Object.class, Object[].class), true); testCases.put(aClass.getDeclaredMethod("invoke0", Method.class, diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveConstantInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * jdk.vm.ci/jdk.vm.ci.hotspot * jdk.vm.ci/jdk.vm.ci.meta diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveFieldInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolvePossiblyCachedConstantInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -29,6 +29,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * java.base/jdk.internal.org.objectweb.asm.tree * jdk.vm.ci/jdk.vm.ci.hotspot diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java --- a/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/compilerToVM/ResolveTypeInPoolTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -30,6 +30,7 @@ * @library /testlibrary /test/lib / * @library ../common/patches * @modules java.base/jdk.internal.misc + * java.base/jdk.internal.reflect * java.base/jdk.internal.org.objectweb.asm * jdk.vm.ci/jdk.vm.ci.hotspot * jdk.vm.ci/jdk.vm.ci.meta diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/events/JvmciNotifyInstallEventTest.java --- a/hotspot/test/compiler/jvmci/events/JvmciNotifyInstallEventTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/events/JvmciNotifyInstallEventTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -47,14 +47,20 @@ * compiler.jvmci.common.CTVMUtilities * compiler.jvmci.common.testcases.SimpleClass * jdk.test.lib.Asserts + * jdk.test.lib.Utils * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI * -Djvmci.compiler=EmptyCompiler -Xbootclasspath/a:. -Xmixed * -XX:+UseJVMCICompiler -XX:-BootstrapJVMCI - * -Dcompiler.jvmci.events.JvmciNotifyInstallEventTest.noevent=false + * -Dcompiler.jvmci.events.JvmciNotifyInstallEventTest.failoninit=false + * compiler.jvmci.events.JvmciNotifyInstallEventTest + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI + * -Djvmci.compiler=EmptyCompiler -Xbootclasspath/a:. -Xmixed + * -XX:+UseJVMCICompiler -XX:-BootstrapJVMCI -XX:JVMCINMethodSizeLimit=0 + * -Dcompiler.jvmci.events.JvmciNotifyInstallEventTest.failoninit=false * compiler.jvmci.events.JvmciNotifyInstallEventTest * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-EnableJVMCI * -Djvmci.compiler=EmptyCompiler -Xbootclasspath/a:. -Xmixed - * -Dcompiler.jvmci.events.JvmciNotifyInstallEventTest.noevent=true + * -Dcompiler.jvmci.events.JvmciNotifyInstallEventTest.failoninit=true * compiler.jvmci.events.JvmciNotifyInstallEventTest */ @@ -64,6 +70,7 @@ import compiler.jvmci.common.testcases.SimpleClass; import jdk.test.lib.Asserts; import java.lang.reflect.Method; +import jdk.test.lib.Utils; import jdk.vm.ci.hotspot.HotSpotVMEventListener; import jdk.vm.ci.code.CompiledCode; import jdk.vm.ci.code.InstalledCode; @@ -79,8 +86,8 @@ public class JvmciNotifyInstallEventTest implements HotSpotVMEventListener { private static final String METHOD_NAME = "testMethod"; - private static final boolean IS_POSITIVE = !Boolean.getBoolean( - "compiler.jvmci.events.JvmciNotifyInstallEventTest.noevent"); + private static final boolean FAIL_ON_INIT = !Boolean.getBoolean( + "compiler.jvmci.events.JvmciNotifyInstallEventTest.failoninit"); private static volatile int gotInstallNotification = 0; public static void main(String args[]) { @@ -91,12 +98,12 @@ if (gotInstallNotification != 0) { throw new Error("Got install notification before test actions"); } - HotSpotCodeCacheProvider codeCache = null; + HotSpotCodeCacheProvider codeCache; try { codeCache = (HotSpotCodeCacheProvider) HotSpotJVMCIRuntime.runtime() .getHostJVMCIBackend().getCodeCache(); } catch (InternalError ie) { - if (IS_POSITIVE) { + if (FAIL_ON_INIT) { throw new AssertionError( "Got unexpected InternalError trying to get code cache", ie); @@ -104,7 +111,7 @@ // passed return; } - Asserts.assertTrue(IS_POSITIVE, + Asserts.assertTrue(FAIL_ON_INIT, "Haven't caught InternalError in negative case"); Method testMethod; try { @@ -114,18 +121,30 @@ } HotSpotResolvedJavaMethod method = CTVMUtilities .getResolvedMethod(SimpleClass.class, testMethod); - HotSpotCompiledCode compiledCode = new HotSpotCompiledCode(METHOD_NAME, new byte[0], 0, new Site[0], - new Assumption[0], new ResolvedJavaMethod[]{method}, new Comment[0], new byte[0], 16, - new DataPatch[0], false, 0, null); - codeCache.installCode(method, compiledCode, /* installedCode = */ null, /* speculationLog = */ null, - /* isDefault = */ false); + HotSpotCompiledCode compiledCode = new HotSpotCompiledCode(METHOD_NAME, + new byte[0], 0, new Site[0], new Assumption[0], + new ResolvedJavaMethod[]{method}, new Comment[0], new byte[0], + 16, new DataPatch[0], false, 0, null); + codeCache.installCode(method, compiledCode, /* installedCode = */ null, + /* speculationLog = */ null, /* isDefault = */ false); Asserts.assertEQ(gotInstallNotification, 1, "Got unexpected event count after 1st install attempt"); // since "empty" compilation result is ok, a second attempt should be ok - codeCache.installCode(method, compiledCode, /* installedCode = */ null, /* speculationLog = */ null, - /* isDefault = */ false); + codeCache.installCode(method, compiledCode, /* installedCode = */ null, + /* speculationLog = */ null, /* isDefault = */ false); Asserts.assertEQ(gotInstallNotification, 2, "Got unexpected event count after 2nd install attempt"); + // and an incorrect cases + Utils.runAndCheckException(() -> { + codeCache.installCode(method, null, null, null, true); + }, NullPointerException.class); + Asserts.assertEQ(gotInstallNotification, 2, + "Got unexpected event count after 3rd install attempt"); + Utils.runAndCheckException(() -> { + codeCache.installCode(null, null, null, null, true); + }, NullPointerException.class); + Asserts.assertEQ(gotInstallNotification, 2, + "Got unexpected event count after 4th install attempt"); } @Override diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java --- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java Wed Jul 05 21:35:27 2017 +0200 @@ -25,7 +25,8 @@ * @test * @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64") * @library ../../../../../ - * @modules jdk.vm.ci/jdk.vm.ci.meta + * @modules java.base/jdk.internal.reflect + * jdk.vm.ci/jdk.vm.ci.meta * jdk.vm.ci/jdk.vm.ci.runtime * jdk.vm.ci/jdk.vm.ci.common * @build jdk.vm.ci.runtime.test.TestResolvedJavaType @@ -70,7 +71,7 @@ import org.junit.Test; -import sun.reflect.ConstantPool; +import jdk.internal.reflect.ConstantPool; /** * Tests for {@link ResolvedJavaType}. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/loopopts/TestCastIINoLoopLimitCheck.java --- a/hotspot/test/compiler/loopopts/TestCastIINoLoopLimitCheck.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/loopopts/TestCastIINoLoopLimitCheck.java Wed Jul 05 21:35:27 2017 +0200 @@ -26,10 +26,19 @@ * @test * @bug 8073184 * @summary CastII that guards counted loops confuses range check elimination with LoopLimitCheck off - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-LoopLimitCheck -XX:CompileOnly=TestCastIINoLoopLimitCheck.m -Xcomp TestCastIINoLoopLimitCheck + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:CompileOnly=TestCastIINoLoopLimitCheck.m -Xcomp TestCastIINoLoopLimitCheck * */ +/* + * The test was originally run with + * + * -XX:+UnlockDiagnosticVMOptions -XX:-LoopLimitCheck + * + * to trigger a problem with code guarded with !LoopLimitCheck. + * JDK-8072422 has removed that code but kept the test because the + * test generates an interesting graph shape. + */ public class TestCastIINoLoopLimitCheck { static void m(int i, int index, char[] buf) { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/compiler/runtime/6859338/Test6859338.java --- a/hotspot/test/compiler/runtime/6859338/Test6859338.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/compiler/runtime/6859338/Test6859338.java Wed Jul 05 21:35:27 2017 +0200 @@ -27,7 +27,7 @@ * @bug 6859338 * @summary Assertion failure in sharedRuntime.cpp * - * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338 + * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338 */ public class Test6859338 { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/TestHumongousReferenceObject.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/TestHumongousReferenceObject.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.internal.vm.annotation.Contended; + +/* + * @test + * @summary Test that verifies that iteration over large, plain Java objects, that potentially cross region boundaries on G1, with references in them works. + * @requires vm.gc == "null" + * @bug 8151499 + * @modules java.base/jdk.internal.vm.annotation + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx1g -XX:+UseParallelGC -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx1g -XX:+UseG1GC -XX:G1HeapRegionSize=1M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx1g -XX:+UseG1GC -XX:G1HeapRegionSize=2M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx1g -XX:+UseG1GC -XX:G1HeapRegionSize=4M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + * @run main/othervm -XX:+EnableContended -XX:-RestrictContended -Xmx1g -XX:+UseG1GC -XX:G1HeapRegionSize=8M -XX:ContendedPaddingWidth=8192 TestHumongousReferenceObject + */ +public class TestHumongousReferenceObject { + + /* + Due to 300 fields with 8K @Contended padding around each field, it takes 2.4M bytes per instance. + With small G1 regions, it is bound to cross regions. G1 should properly (card) mark the object nevertheless. + With 1G heap, it is enough to allocate ~400 of these objects to provoke at least one GC. + */ + + static volatile Object instance; + + public static void main(String[] args) { + for (int c = 0; c < 400; c++) { + instance = new TestHumongousReferenceObject(); + } + } + + @Contended Integer int_1 = new Integer(1); + @Contended Integer int_2 = new Integer(2); + @Contended Integer int_3 = new Integer(3); + @Contended Integer int_4 = new Integer(4); + @Contended Integer int_5 = new Integer(5); + @Contended Integer int_6 = new Integer(6); + @Contended Integer int_7 = new Integer(7); + @Contended Integer int_8 = new Integer(8); + @Contended Integer int_9 = new Integer(9); + @Contended Integer int_10 = new Integer(10); + @Contended Integer int_11 = new Integer(11); + @Contended Integer int_12 = new Integer(12); + @Contended Integer int_13 = new Integer(13); + @Contended Integer int_14 = new Integer(14); + @Contended Integer int_15 = new Integer(15); + @Contended Integer int_16 = new Integer(16); + @Contended Integer int_17 = new Integer(17); + @Contended Integer int_18 = new Integer(18); + @Contended Integer int_19 = new Integer(19); + @Contended Integer int_20 = new Integer(20); + @Contended Integer int_21 = new Integer(21); + @Contended Integer int_22 = new Integer(22); + @Contended Integer int_23 = new Integer(23); + @Contended Integer int_24 = new Integer(24); + @Contended Integer int_25 = new Integer(25); + @Contended Integer int_26 = new Integer(26); + @Contended Integer int_27 = new Integer(27); + @Contended Integer int_28 = new Integer(28); + @Contended Integer int_29 = new Integer(29); + @Contended Integer int_30 = new Integer(30); + @Contended Integer int_31 = new Integer(31); + @Contended Integer int_32 = new Integer(32); + @Contended Integer int_33 = new Integer(33); + @Contended Integer int_34 = new Integer(34); + @Contended Integer int_35 = new Integer(35); + @Contended Integer int_36 = new Integer(36); + @Contended Integer int_37 = new Integer(37); + @Contended Integer int_38 = new Integer(38); + @Contended Integer int_39 = new Integer(39); + @Contended Integer int_40 = new Integer(40); + @Contended Integer int_41 = new Integer(41); + @Contended Integer int_42 = new Integer(42); + @Contended Integer int_43 = new Integer(43); + @Contended Integer int_44 = new Integer(44); + @Contended Integer int_45 = new Integer(45); + @Contended Integer int_46 = new Integer(46); + @Contended Integer int_47 = new Integer(47); + @Contended Integer int_48 = new Integer(48); + @Contended Integer int_49 = new Integer(49); + @Contended Integer int_50 = new Integer(50); + @Contended Integer int_51 = new Integer(51); + @Contended Integer int_52 = new Integer(52); + @Contended Integer int_53 = new Integer(53); + @Contended Integer int_54 = new Integer(54); + @Contended Integer int_55 = new Integer(55); + @Contended Integer int_56 = new Integer(56); + @Contended Integer int_57 = new Integer(57); + @Contended Integer int_58 = new Integer(58); + @Contended Integer int_59 = new Integer(59); + @Contended Integer int_60 = new Integer(60); + @Contended Integer int_61 = new Integer(61); + @Contended Integer int_62 = new Integer(62); + @Contended Integer int_63 = new Integer(63); + @Contended Integer int_64 = new Integer(64); + @Contended Integer int_65 = new Integer(65); + @Contended Integer int_66 = new Integer(66); + @Contended Integer int_67 = new Integer(67); + @Contended Integer int_68 = new Integer(68); + @Contended Integer int_69 = new Integer(69); + @Contended Integer int_70 = new Integer(70); + @Contended Integer int_71 = new Integer(71); + @Contended Integer int_72 = new Integer(72); + @Contended Integer int_73 = new Integer(73); + @Contended Integer int_74 = new Integer(74); + @Contended Integer int_75 = new Integer(75); + @Contended Integer int_76 = new Integer(76); + @Contended Integer int_77 = new Integer(77); + @Contended Integer int_78 = new Integer(78); + @Contended Integer int_79 = new Integer(79); + @Contended Integer int_80 = new Integer(80); + @Contended Integer int_81 = new Integer(81); + @Contended Integer int_82 = new Integer(82); + @Contended Integer int_83 = new Integer(83); + @Contended Integer int_84 = new Integer(84); + @Contended Integer int_85 = new Integer(85); + @Contended Integer int_86 = new Integer(86); + @Contended Integer int_87 = new Integer(87); + @Contended Integer int_88 = new Integer(88); + @Contended Integer int_89 = new Integer(89); + @Contended Integer int_90 = new Integer(90); + @Contended Integer int_91 = new Integer(91); + @Contended Integer int_92 = new Integer(92); + @Contended Integer int_93 = new Integer(93); + @Contended Integer int_94 = new Integer(94); + @Contended Integer int_95 = new Integer(95); + @Contended Integer int_96 = new Integer(96); + @Contended Integer int_97 = new Integer(97); + @Contended Integer int_98 = new Integer(98); + @Contended Integer int_99 = new Integer(99); + @Contended Integer int_100 = new Integer(100); + @Contended Integer int_101 = new Integer(101); + @Contended Integer int_102 = new Integer(102); + @Contended Integer int_103 = new Integer(103); + @Contended Integer int_104 = new Integer(104); + @Contended Integer int_105 = new Integer(105); + @Contended Integer int_106 = new Integer(106); + @Contended Integer int_107 = new Integer(107); + @Contended Integer int_108 = new Integer(108); + @Contended Integer int_109 = new Integer(109); + @Contended Integer int_110 = new Integer(110); + @Contended Integer int_111 = new Integer(111); + @Contended Integer int_112 = new Integer(112); + @Contended Integer int_113 = new Integer(113); + @Contended Integer int_114 = new Integer(114); + @Contended Integer int_115 = new Integer(115); + @Contended Integer int_116 = new Integer(116); + @Contended Integer int_117 = new Integer(117); + @Contended Integer int_118 = new Integer(118); + @Contended Integer int_119 = new Integer(119); + @Contended Integer int_120 = new Integer(120); + @Contended Integer int_121 = new Integer(121); + @Contended Integer int_122 = new Integer(122); + @Contended Integer int_123 = new Integer(123); + @Contended Integer int_124 = new Integer(124); + @Contended Integer int_125 = new Integer(125); + @Contended Integer int_126 = new Integer(126); + @Contended Integer int_127 = new Integer(127); + @Contended Integer int_128 = new Integer(128); + @Contended Integer int_129 = new Integer(129); + @Contended Integer int_130 = new Integer(130); + @Contended Integer int_131 = new Integer(131); + @Contended Integer int_132 = new Integer(132); + @Contended Integer int_133 = new Integer(133); + @Contended Integer int_134 = new Integer(134); + @Contended Integer int_135 = new Integer(135); + @Contended Integer int_136 = new Integer(136); + @Contended Integer int_137 = new Integer(137); + @Contended Integer int_138 = new Integer(138); + @Contended Integer int_139 = new Integer(139); + @Contended Integer int_140 = new Integer(140); + @Contended Integer int_141 = new Integer(141); + @Contended Integer int_142 = new Integer(142); + @Contended Integer int_143 = new Integer(143); + @Contended Integer int_144 = new Integer(144); + @Contended Integer int_145 = new Integer(145); + @Contended Integer int_146 = new Integer(146); + @Contended Integer int_147 = new Integer(147); + @Contended Integer int_148 = new Integer(148); + @Contended Integer int_149 = new Integer(149); + @Contended Integer int_150 = new Integer(150); + @Contended Integer int_151 = new Integer(151); + @Contended Integer int_152 = new Integer(152); + @Contended Integer int_153 = new Integer(153); + @Contended Integer int_154 = new Integer(154); + @Contended Integer int_155 = new Integer(155); + @Contended Integer int_156 = new Integer(156); + @Contended Integer int_157 = new Integer(157); + @Contended Integer int_158 = new Integer(158); + @Contended Integer int_159 = new Integer(159); + @Contended Integer int_160 = new Integer(160); + @Contended Integer int_161 = new Integer(161); + @Contended Integer int_162 = new Integer(162); + @Contended Integer int_163 = new Integer(163); + @Contended Integer int_164 = new Integer(164); + @Contended Integer int_165 = new Integer(165); + @Contended Integer int_166 = new Integer(166); + @Contended Integer int_167 = new Integer(167); + @Contended Integer int_168 = new Integer(168); + @Contended Integer int_169 = new Integer(169); + @Contended Integer int_170 = new Integer(170); + @Contended Integer int_171 = new Integer(171); + @Contended Integer int_172 = new Integer(172); + @Contended Integer int_173 = new Integer(173); + @Contended Integer int_174 = new Integer(174); + @Contended Integer int_175 = new Integer(175); + @Contended Integer int_176 = new Integer(176); + @Contended Integer int_177 = new Integer(177); + @Contended Integer int_178 = new Integer(178); + @Contended Integer int_179 = new Integer(179); + @Contended Integer int_180 = new Integer(180); + @Contended Integer int_181 = new Integer(181); + @Contended Integer int_182 = new Integer(182); + @Contended Integer int_183 = new Integer(183); + @Contended Integer int_184 = new Integer(184); + @Contended Integer int_185 = new Integer(185); + @Contended Integer int_186 = new Integer(186); + @Contended Integer int_187 = new Integer(187); + @Contended Integer int_188 = new Integer(188); + @Contended Integer int_189 = new Integer(189); + @Contended Integer int_190 = new Integer(190); + @Contended Integer int_191 = new Integer(191); + @Contended Integer int_192 = new Integer(192); + @Contended Integer int_193 = new Integer(193); + @Contended Integer int_194 = new Integer(194); + @Contended Integer int_195 = new Integer(195); + @Contended Integer int_196 = new Integer(196); + @Contended Integer int_197 = new Integer(197); + @Contended Integer int_198 = new Integer(198); + @Contended Integer int_199 = new Integer(199); + @Contended Integer int_200 = new Integer(200); + @Contended Integer int_201 = new Integer(201); + @Contended Integer int_202 = new Integer(202); + @Contended Integer int_203 = new Integer(203); + @Contended Integer int_204 = new Integer(204); + @Contended Integer int_205 = new Integer(205); + @Contended Integer int_206 = new Integer(206); + @Contended Integer int_207 = new Integer(207); + @Contended Integer int_208 = new Integer(208); + @Contended Integer int_209 = new Integer(209); + @Contended Integer int_210 = new Integer(210); + @Contended Integer int_211 = new Integer(211); + @Contended Integer int_212 = new Integer(212); + @Contended Integer int_213 = new Integer(213); + @Contended Integer int_214 = new Integer(214); + @Contended Integer int_215 = new Integer(215); + @Contended Integer int_216 = new Integer(216); + @Contended Integer int_217 = new Integer(217); + @Contended Integer int_218 = new Integer(218); + @Contended Integer int_219 = new Integer(219); + @Contended Integer int_220 = new Integer(220); + @Contended Integer int_221 = new Integer(221); + @Contended Integer int_222 = new Integer(222); + @Contended Integer int_223 = new Integer(223); + @Contended Integer int_224 = new Integer(224); + @Contended Integer int_225 = new Integer(225); + @Contended Integer int_226 = new Integer(226); + @Contended Integer int_227 = new Integer(227); + @Contended Integer int_228 = new Integer(228); + @Contended Integer int_229 = new Integer(229); + @Contended Integer int_230 = new Integer(230); + @Contended Integer int_231 = new Integer(231); + @Contended Integer int_232 = new Integer(232); + @Contended Integer int_233 = new Integer(233); + @Contended Integer int_234 = new Integer(234); + @Contended Integer int_235 = new Integer(235); + @Contended Integer int_236 = new Integer(236); + @Contended Integer int_237 = new Integer(237); + @Contended Integer int_238 = new Integer(238); + @Contended Integer int_239 = new Integer(239); + @Contended Integer int_240 = new Integer(240); + @Contended Integer int_241 = new Integer(241); + @Contended Integer int_242 = new Integer(242); + @Contended Integer int_243 = new Integer(243); + @Contended Integer int_244 = new Integer(244); + @Contended Integer int_245 = new Integer(245); + @Contended Integer int_246 = new Integer(246); + @Contended Integer int_247 = new Integer(247); + @Contended Integer int_248 = new Integer(248); + @Contended Integer int_249 = new Integer(249); + @Contended Integer int_250 = new Integer(250); + @Contended Integer int_251 = new Integer(251); + @Contended Integer int_252 = new Integer(252); + @Contended Integer int_253 = new Integer(253); + @Contended Integer int_254 = new Integer(254); + @Contended Integer int_255 = new Integer(255); + @Contended Integer int_256 = new Integer(256); + @Contended Integer int_257 = new Integer(257); + @Contended Integer int_258 = new Integer(258); + @Contended Integer int_259 = new Integer(259); + @Contended Integer int_260 = new Integer(260); + @Contended Integer int_261 = new Integer(261); + @Contended Integer int_262 = new Integer(262); + @Contended Integer int_263 = new Integer(263); + @Contended Integer int_264 = new Integer(264); + @Contended Integer int_265 = new Integer(265); + @Contended Integer int_266 = new Integer(266); + @Contended Integer int_267 = new Integer(267); + @Contended Integer int_268 = new Integer(268); + @Contended Integer int_269 = new Integer(269); + @Contended Integer int_270 = new Integer(270); + @Contended Integer int_271 = new Integer(271); + @Contended Integer int_272 = new Integer(272); + @Contended Integer int_273 = new Integer(273); + @Contended Integer int_274 = new Integer(274); + @Contended Integer int_275 = new Integer(275); + @Contended Integer int_276 = new Integer(276); + @Contended Integer int_277 = new Integer(277); + @Contended Integer int_278 = new Integer(278); + @Contended Integer int_279 = new Integer(279); + @Contended Integer int_280 = new Integer(280); + @Contended Integer int_281 = new Integer(281); + @Contended Integer int_282 = new Integer(282); + @Contended Integer int_283 = new Integer(283); + @Contended Integer int_284 = new Integer(284); + @Contended Integer int_285 = new Integer(285); + @Contended Integer int_286 = new Integer(286); + @Contended Integer int_287 = new Integer(287); + @Contended Integer int_288 = new Integer(288); + @Contended Integer int_289 = new Integer(289); + @Contended Integer int_290 = new Integer(290); + @Contended Integer int_291 = new Integer(291); + @Contended Integer int_292 = new Integer(292); + @Contended Integer int_293 = new Integer(293); + @Contended Integer int_294 = new Integer(294); + @Contended Integer int_295 = new Integer(295); + @Contended Integer int_296 = new Integer(296); + @Contended Integer int_297 = new Integer(297); + @Contended Integer int_298 = new Integer(298); + @Contended Integer int_299 = new Integer(299); + @Contended Integer int_300 = new Integer(300); +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/arguments/TestDisableDefaultGC.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/arguments/TestDisableDefaultGC.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestDisableDefaultGC + * @summary Test that the VM complains when the default GC is disabled and no other GC is specified + * @bug 8068579 + * @key gc + * @library /testlibrary + * @requires vm.gc=="null" + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDisableDefaultGC + */ + +import jdk.test.lib.ProcessTools; +import jdk.test.lib.OutputAnalyzer; + +public class TestDisableDefaultGC { + public static void main(String[] args) throws Exception { + // Start VM, disabling all possible default GCs + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseSerialGC", + "-XX:-UseParallelGC", + "-XX:-UseG1GC", + "-XX:-UseConcMarkSweepGC", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("Garbage collector not selected"); + output.shouldHaveExitValue(1); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/arguments/TestMaxHeapSizeTools.java --- a/hotspot/test/gc/arguments/TestMaxHeapSizeTools.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/arguments/TestMaxHeapSizeTools.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* -* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,10 +112,12 @@ } private static void checkInvalidMinInitialHeapCombinations(String gcflag) throws Exception { + expectError(new String[] { gcflag, "-XX:InitialHeapSize=1023K", "-version" }); expectError(new String[] { gcflag, "-Xms64M", "-XX:InitialHeapSize=32M", "-version" }); } private static void checkValidMinInitialHeapCombinations(String gcflag) throws Exception { + expectValid(new String[] { gcflag, "-XX:InitialHeapSize=1024K", "-version" }); expectValid(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-Xms4M", "-version" }); expectValid(new String[] { gcflag, "-Xms4M", "-XX:InitialHeapSize=8M", "-version" }); expectValid(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-Xms8M", "-version" }); @@ -124,11 +126,13 @@ } private static void checkInvalidInitialMaxHeapCombinations(String gcflag) throws Exception { + expectError(new String[] { gcflag, "-XX:MaxHeapSize=2047K", "-version" }); expectError(new String[] { gcflag, "-XX:MaxHeapSize=4M", "-XX:InitialHeapSize=8M", "-version" }); expectError(new String[] { gcflag, "-XX:InitialHeapSize=8M", "-XX:MaxHeapSize=4M", "-version" }); } private static void checkValidInitialMaxHeapCombinations(String gcflag) throws Exception { + expectValid(new String[] { gcflag, "-XX:MaxHeapSize=2048K", "-version" }); expectValid(new String[] { gcflag, "-XX:InitialHeapSize=4M", "-XX:MaxHeapSize=8M", "-version" }); expectValid(new String[] { gcflag, "-XX:MaxHeapSize=8M", "-XX:InitialHeapSize=4M", "-version" }); expectValid(new String[] { gcflag, "-XX:MaxHeapSize=4M", "-XX:InitialHeapSize=4M", "-version" }); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/arguments/TestMaxMinHeapFreeRatioFlags.java --- a/hotspot/test/gc/arguments/TestMaxMinHeapFreeRatioFlags.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/arguments/TestMaxMinHeapFreeRatioFlags.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* -* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,11 +61,11 @@ negativeTest(-1, false, 50, false, options); negativeTest(50, true, -1, true, options); - positiveTest(10, false, 90, false, options); - positiveTest(10, true, 80, false, options); - positiveTest(20, false, 70, true, options); - positiveTest(25, true, 65, true, options); - positiveTest(40, false, 50, false, options); + positiveTest(10, false, 90, false, true, options); + positiveTest(10, true, 80, false, true, options); + positiveTest(20, false, 70, true, true, options); + positiveTest(25, true, 65, true, true, options); + positiveTest(40, false, 50, false, true, options); } /** @@ -79,7 +79,7 @@ * @param options additional options for JVM */ public static void positiveTest(int minRatio, boolean useXminf, - int maxRatio, boolean useXmaxf, + int maxRatio, boolean useXmaxf, boolean shrinkHeapInSteps, LinkedList options) throws Exception { LinkedList vmOptions = new LinkedList<>(options); @@ -90,9 +90,11 @@ "-Xms" + HEAP_SIZE, "-XX:NewSize=" + NEW_SIZE, "-XX:MaxNewSize=" + MAX_NEW_SIZE, + "-XX:" + (shrinkHeapInSteps ? '+' : '-') + "ShrinkHeapInSteps", RatioVerifier.class.getName(), Integer.toString(minRatio), - Integer.toString(maxRatio) + Integer.toString(maxRatio), + Boolean.toString(shrinkHeapInSteps) ); ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[vmOptions.size()])); @@ -151,8 +153,8 @@ public static LinkedList garbage = new LinkedList<>(); public static void main(String args[]) throws Exception { - if (args.length != 2) { - throw new IllegalArgumentException("Expected 2 args: "); + if (args.length != 3) { + throw new IllegalArgumentException("Expected 3 args: "); } if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.PSOld) { System.out.println("Test is not applicable to parallel GC"); @@ -161,8 +163,10 @@ double minRatio = Integer.valueOf(args[0]) / 100.0; double maxRatio = Integer.valueOf(args[1]) / 100.0; + boolean shrinkHeapInSteps = Boolean.valueOf(args[2]); long maxHeapSize = getMax(); + int gcTries = (shrinkHeapInSteps ? GC_TRIES : 1); // commit 0.5 of total heap size to have enough space // to both shink and expand @@ -170,7 +174,7 @@ garbage.add(new byte[ARRAY_LENGTH]); } - forceGC(); + forceGC(gcTries); // Verify that current heap free ratio lies between specified limits verifyRatio(minRatio, maxRatio); @@ -185,7 +189,7 @@ memoryToFill -= CHUNK_SIZE; } - forceGC(); + forceGC(gcTries); // Verify that after memory allocation heap free ratio is still conforming specified limits verifyRatio(minRatio, maxRatio); // Verify that heap was actually expanded @@ -204,7 +208,7 @@ memoryToFree -= CHUNK_SIZE; } - forceGC(); + forceGC(gcTries); // Verify that heap free ratio is still conforming specified limits verifyRatio(minRatio, maxRatio); // Verify that heap was actually shrinked @@ -214,8 +218,8 @@ } - public static void forceGC() { - for (int i = 0; i < GC_TRIES; i++) { + public static void forceGC(int gcTries) { + for (int i = 0; i < gcTries; i++) { System.gc(); try { Thread.sleep(10); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/arguments/TestSelectDefaultGC.java --- a/hotspot/test/gc/arguments/TestSelectDefaultGC.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/arguments/TestSelectDefaultGC.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,9 +27,9 @@ * @bug 8068582 * @key gc * @library /testlibrary + * @requires vm.gc=="null" * @modules java.base/jdk.internal.misc * java.management - * @ignore 8148239 * @run driver TestSelectDefaultGC */ @@ -41,24 +41,40 @@ output.shouldMatch(" " + option + " .*=.* " + value + " "); } - public static void main(String[] args) throws Exception { + public static void testDefaultGC(boolean actAsServer) throws Exception { + String[] args = new String[] { + "-XX:" + (actAsServer ? "+" : "-") + "AlwaysActAsServerClassMachine", + "-XX:" + (actAsServer ? "-" : "+") + "NeverActAsServerClassMachine", + "-XX:+PrintFlagsFinal", + "-version" + }; + // Start VM without specifying GC - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintFlagsFinal", "-version"); + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args); OutputAnalyzer output = new OutputAnalyzer(pb.start()); output.shouldHaveExitValue(0); - boolean isServerVM = Platform.isServer(); - boolean isEmbeddedVM = Platform.isEmbedded(); + final boolean isServer = actAsServer; + final boolean isEmbedded = Platform.isEmbedded(); // Verify GC selection - // G1 is default for non-embedded server VMs - assertVMOption(output, "UseG1GC", isServerVM && !isEmbeddedVM); - // Parallel is default for embedded server VMs - assertVMOption(output, "UseParallelGC", isServerVM && isEmbeddedVM); - assertVMOption(output, "UseParallelOldGC", isServerVM && isEmbeddedVM); - // Serial is default for non-server VMs - assertVMOption(output, "UseSerialGC", !isServerVM); + // G1 is default for non-embedded server class machines + assertVMOption(output, "UseG1GC", isServer && !isEmbedded); + // Parallel is default for embedded server class machines + assertVMOption(output, "UseParallelGC", isServer && isEmbedded); + assertVMOption(output, "UseParallelOldGC", isServer && isEmbedded); + // Serial is default for non-server class machines + assertVMOption(output, "UseSerialGC", !isServer); + // CMS is never default assertVMOption(output, "UseConcMarkSweepGC", false); assertVMOption(output, "UseParNewGC", false); } + + public static void main(String[] args) throws Exception { + // Test server class machine + testDefaultGC(false); + + // Test non-server class machine + testDefaultGC(true); + } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/arguments/TestShrinkHeapInSteps.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/arguments/TestShrinkHeapInSteps.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,55 @@ +/* +* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestShrinkHeapInSteps + * @key gc + * @summary Verify that -XX:-ShrinkHeapInSteps works properly. + * @library /testlibrary + * @modules java.base/jdk.internal.misc + * java.management + * @build TestMaxMinHeapFreeRatioFlags TestShrinkHeapInSteps + * @run driver/timeout=240 TestShrinkHeapInSteps + */ + +import java.util.LinkedList; +import java.util.Arrays; +import java.util.Collections; +import jdk.test.lib.Utils; + +public class TestShrinkHeapInSteps { + public static void main(String args[]) throws Exception { + LinkedList options = new LinkedList<>( + Arrays.asList(Utils.getFilteredTestJavaOpts("-XX:[^ ]*HeapFreeRatio","-XX:\\+ExplicitGCInvokesConcurrent")) + ); + + // Leverage the existing TestMaxMinHeapFreeRatioFlags test, but pass + // "false" for the shrinkHeapInSteps argument. This will cause it to + // run with -XX:-ShrinkHeapInSteps, and only do 1 full GC instead of 10. + TestMaxMinHeapFreeRatioFlags.positiveTest(10, false, 90, false, false, options); + TestMaxMinHeapFreeRatioFlags.positiveTest(10, true, 80, false, false, options); + TestMaxMinHeapFreeRatioFlags.positiveTest(20, false, 70, true, false, options); + TestMaxMinHeapFreeRatioFlags.positiveTest(25, true, 65, true, false, options); + TestMaxMinHeapFreeRatioFlags.positiveTest(40, false, 50, false, false, options); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java --- a/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,14 @@ System.arraycopy(baseArgs, 0, finalArgs, extraArgs.length, baseArgs.length); pb_enabled = ProcessTools.createJavaProcessBuilder(finalArgs); verifyDynamicNumberOfGCThreads(new OutputAnalyzer(pb_enabled.start())); + + // Turn on parallel reference processing + String[] parRefProcArg = {"-XX:+ParallelRefProcEnabled", "-XX:-ShowMessageBoxOnError"}; + String[] parRefArgs = new String[baseArgs.length + parRefProcArg.length]; + System.arraycopy(parRefProcArg, 0, parRefArgs, 0, parRefProcArg.length); + System.arraycopy(baseArgs, 0, parRefArgs, parRefProcArg.length, baseArgs.length); + pb_enabled = ProcessTools.createJavaProcessBuilder(parRefArgs); + verifyDynamicNumberOfGCThreads(new OutputAnalyzer(pb_enabled.start())); } static class GCTest { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/Test2GbHeap.java --- a/hotspot/test/gc/g1/Test2GbHeap.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/Test2GbHeap.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ if (output.getOutput().contains("Could not reserve enough space for 2097152KB object heap")) { // Will fail on machines with too little memory (and Windows 32-bit VM), ignore such failures. output.shouldHaveExitValue(1); - } else if (output.getOutput().contains("G1 GC is disabled in this release")) { + } else if (output.getOutput().contains("-XX:+UseG1GC not supported in this VM")) { // G1 is not supported on embedded, ignore such failures. output.shouldHaveExitValue(1); } else { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/TestRegionLivenessPrint.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/TestRegionLivenessPrint.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestRegionLivenessPrint.java + * @bug 8151920 + * @requires vm.gc=="G1" | vm.gc=="null" + * @summary Make sure that G1 does not assert when printing region liveness data on a humongous continues region. + * @key gc + * @library /testlibrary /test/lib + * @modules java.base/jdk.internal.misc + * @build TestRegionLivenessPrint + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+WhiteBoxAPI -XX:+UseG1GC -Xmx128M -XX:G1HeapRegionSize=1m -Xlog:gc+liveness=trace TestRegionLivenessPrint + */ + +import sun.hotspot.WhiteBox; + +public class TestRegionLivenessPrint { + + static byte[] bigobj = new byte[1024* 1024 * 2]; + + public static void main(String[] args) throws InterruptedException { + WhiteBox wb = WhiteBox.getWhiteBox(); + // Run a concurrent mark cycle to trigger the liveness accounting log messages. + wb.g1StartConcMarkCycle(); + while (wb.g1InConcurrentMark()) { + Thread.sleep(100); + } + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/TestStringSymbolTableStats.java --- a/hotspot/test/gc/g1/TestStringSymbolTableStats.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/TestStringSymbolTableStats.java Wed Jul 05 21:35:27 2017 +0200 @@ -39,7 +39,7 @@ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-XX:+UnlockExperimentalVMOptions", - "-Xlog:gc+stringdedup=trace", + "-Xlog:gc+stringtable=trace", SystemGCTest.class.getName()); OutputAnalyzer output = new OutputAnalyzer(pb.start()); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/ihop/TestIHOPErgo.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/ihop/TestIHOPErgo.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test TestIHOPErgo + * @bug 8148397 + * @summary Test checks that behavior of Adaptive and Static IHOP at concurrent cycle initiation + * @requires vm.gc=="G1" | vm.gc=="null" + * @requires vm.opt.FlightRecorder != true + * @requires vm.opt.ExplicitGCInvokesConcurrent != true + * @library /testlibrary /test/lib / + * @modules java.management + * @build gc.g1.ihop.TestIHOPErgo + * gc.g1.ihop.lib.IhopUtils + * @run driver/timeout=480 gc.g1.ihop.TestIHOPErgo + */ +package gc.g1.ihop; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; + +import gc.g1.ihop.lib.IhopUtils; + +/** + * The test starts the AppIHOP multiple times varying settings of MaxHeapSize. + * The test parses GC log from AppIHOP to check: + * - occupancy is not less than threshold for Adaptive and Static IHOP at + * concurrent cycle initiation + * - Adaptive IHOP prediction was started during AppIHOP executing + * - log contains ergonomic messages in log + */ +public class TestIHOPErgo { + + // Common GC tune and logging options for test. + private final static String[] COMMON_OPTIONS = { + "-XX:+UnlockExperimentalVMOptions", + "-XX:G1MixedGCLiveThresholdPercent=100", + "-XX:G1HeapWastePercent=0", + "-XX:MaxGCPauseMillis=30000", + "-XX:G1MixedGCCountTarget=1", + "-XX:+UseG1GC", + "-XX:G1HeapRegionSize=1m", + "-XX:+G1UseAdaptiveIHOP", + "-Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug", + "-XX:+AlwaysTenure", + "-XX:G1AdaptiveIHOPNumInitialSamples=1", + "-XX:InitiatingHeapOccupancyPercent=30" + }; + + public static void main(String[] args) throws Throwable { + + // heap size MB, sleep time for allocator, true/false for adaptive/static + runTest(64, 0, false); + runTest(64, 100, false); + runTest(128, 100, false); + runTest(256, 50, false); + runTest(512, 30, false); + runTest(64, 50, true); + runTest(128, 200, true); + runTest(256, 100, true); + runTest(512, 50, true); + } + + /** + * Runs AppIHOP in separate VM and checks GC log. + * + * @param heapSize heap size + * @param sleepTime sleep time between memory allocations. + * @param isIhopAdaptive true forAdaptive IHOP, false for Static + * + * @throws Throwable + */ + private static void runTest(int heapSize, int sleepTime, boolean isIhopAdaptive) throws Throwable { + System.out.println("IHOP test:"); + System.out.println(" MaxHeapSize : " + heapSize); + + List options = new ArrayList<>(); + Collections.addAll(options, + "-Dheap.size=" + heapSize, + "-Dsleep.time=" + sleepTime, + "-XX:MaxHeapSize=" + heapSize + "M", + "-XX:NewSize=" + heapSize / 8 + "M", + "-XX:MaxNewSize=" + heapSize / 8 + "M", + "-XX:InitialHeapSize=" + heapSize + "M", + "-XX:" + (isIhopAdaptive ? "+" : "-") + "G1UseAdaptiveIHOP" + ); + + Collections.addAll(options, COMMON_OPTIONS); + options.add(AppIHOP.class.getName()); + OutputAnalyzer out = executeTest(options); + + // Checks that log contains message which indicates that IHOP prediction is active + if (isIhopAdaptive) { + IhopUtils.checkAdaptiveIHOPWasActivated(out); + } + // Checks that log contains messages which indicates that VM initiates/checks heap occupancy + // and tries to start concurrent cycle. + IhopUtils.checkErgoMessagesExist(out); + + // Checks threshold and occupancy values + IhopUtils.checkIhopLogValues(out); + } + + private static OutputAnalyzer executeTest(List options) throws Throwable, RuntimeException { + OutputAnalyzer out; + out = ProcessTools.executeTestJvm(options.toArray(new String[options.size()])); + if (out.getExitValue() != 0) { + System.out.println(out.getOutput()); + throw new RuntimeException("AppIHOP failed with exit code" + out.getExitValue()); + } + return out; + } + + /** + * The AppIHOP fills 60% of heap and allocates and frees 30% of existing + * heap 'iterations' times to achieve IHOP activation. To be executed in + * separate VM. Expected properties: + * heap.size - heap size which is used to calculate amount of memory + * to be allocated and freed + * sleep.time - short pause between filling each MB + */ + public static class AppIHOP { + + public final static LinkedList GARBAGE = new LinkedList<>(); + + private final int ITERATIONS = 10; + private final int OBJECT_SIZE = 100000; + // 60% of the heap will be filled before test cycles. + // 30% of the heap will be filled and freed during test cycle. + private final long HEAP_PREALLOC_PCT = 60; + private final long HEAP_ALLOC_PCT = 30; + private final long HEAP_SIZE; + // Amount of memory to be allocated before iterations start + private final long HEAP_PREALLOC_SIZE; + // Amount of memory to be allocated and freed during iterations + private final long HEAP_ALLOC_SIZE; + private final int SLEEP_TIME; + + public static void main(String[] args) throws InterruptedException { + new AppIHOP().start(); + } + + AppIHOP() { + HEAP_SIZE = Integer.getInteger("heap.size") * 1024 * 1024; + SLEEP_TIME = Integer.getInteger("sleep.time"); + + HEAP_PREALLOC_SIZE = HEAP_SIZE * HEAP_PREALLOC_PCT / 100; + HEAP_ALLOC_SIZE = HEAP_SIZE * HEAP_ALLOC_PCT / 100; + } + + public void start() throws InterruptedException { + fill(HEAP_PREALLOC_SIZE); + fillAndFree(HEAP_ALLOC_SIZE, ITERATIONS); + } + + /** + * Fills allocationSize bytes of garbage. + * + * @param allocationSize amount of garbage + */ + private void fill(long allocationSize) { + long allocated = 0; + while (allocated < allocationSize) { + GARBAGE.addFirst(new byte[OBJECT_SIZE]); + allocated += OBJECT_SIZE; + } + } + + /** + * Allocates allocationSize bytes of garbage. Performs a short pauses + * during allocation. Frees allocated garbage. + * + * @param allocationSize amount of garbage per iteration + * @param iterations iteration count + * + * @throws InterruptedException + */ + private void fillAndFree(long allocationSize, int iterations) throws InterruptedException { + + for (int i = 0; i < iterations; ++i) { + System.out.println("Iteration:" + i); + long allocated = 0; + long counter = 0; + while (allocated < allocationSize) { + GARBAGE.addFirst(new byte[OBJECT_SIZE]); + allocated += OBJECT_SIZE; + counter += OBJECT_SIZE; + if (counter > 1024 * 1024) { + counter = 0; + if (SLEEP_TIME != 0) { + Thread.sleep(SLEEP_TIME); + } + } + } + long removed = 0; + while (removed < allocationSize) { + GARBAGE.removeLast(); + removed += OBJECT_SIZE; + } + } + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/ihop/TestIHOPStatic.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/ihop/TestIHOPStatic.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test TestIHOPStatic + * @bug 8148397 + * @summary Test checks concurrent cycle initiation which depends on IHOP value. + * @requires vm.gc=="G1" | vm.gc=="null" + * @requires vm.opt.FlightRecorder != true + * @requires vm.opt.ExplicitGCInvokesConcurrent != true + * @library /testlibrary / + * @modules java.management + * @build gc.g1.ihop.TestIHOPStatic + * gc.g1.ihop.lib.IhopUtils + * @run driver/timeout=240 gc.g1.ihop.TestIHOPStatic + */ +package gc.g1.ihop; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; +import jdk.test.lib.Utils; + +import gc.g1.ihop.lib.IhopUtils; + +/** + * The test starts the AppIHOP multiple times varying setting of MaxHeapSize, + * IHOP and amount of memory to allocate. Then the test parses the GC log from + * the app to check that Concurrent Mark Cycle was initiated only if needed + * and at the right moment, defined by IHOP setting. + */ +public class TestIHOPStatic { + + final static long YOUNG_SIZE = 8 * 1024 * 1024; + + private final static String[] COMMON_OPTIONS = { + "-XX:+UseG1GC", + "-XX:G1HeapRegionSize=1m", + "-XX:-G1UseAdaptiveIHOP", + "-XX:NewSize=" + YOUNG_SIZE, + "-XX:MaxNewSize=" + YOUNG_SIZE, + "-Xlog:gc+ihop+ergo=debug,gc*=debug" + }; + + public static void main(String[] args) throws Throwable { + + // Test case: + // IHOP value, heap occupancy, heap size, expectation of message + // Test cases for occupancy is greater than IHOP + runTest(30, 35, 64, true); + runTest(50, 55, 256, true); + runTest(60, 65, 64, true); + runTest(70, 75, 512, true); + + // Test cases for big difference between occupancy and IHOP + runTest(30, 50, 256, true); + runTest(30, 70, 512, true); + runTest(50, 70, 256, true); + + // Test cases for occupancy is less than IHOP + runTest(30, 25, 64, false); + runTest(50, 45, 256, false); + runTest(70, 65, 64, false); + runTest(70, 65, 512, false); + + // Test cases for big difference between occupancy and IHOP + runTest(50, 30, 300, false); + runTest(70, 50, 160, false); + + // Cases for 0 and 100 IHOP. + runTest(0, 50, 256, true); + runTest(0, 95, 512, true); + runTest(100, 20, 64, false); + runTest(100, 100, 512, false); + } + + /** + * Runs the test case. + * + * @param ihop IHOP value + * @param pctToFill heap percentage to be filled + * @param heapSize heap size for test + * @param expectInitiationMessage + * true - concurrent cycle initiation message is expected + * false - message is not expected + * + * @throws Throwable + */ + private static void runTest(int ihop, long pctToFill, long heapSize, boolean expectInitiationMessage) throws Throwable { + System.out.println(""); + System.out.println("IHOP test:"); + System.out.println(" InitiatingHeapOccupancyPercent : " + ihop); + System.out.println(" Part of heap to fill (percentage) : " + pctToFill); + System.out.println(" MaxHeapSize : " + heapSize); + System.out.println(" Expect for concurrent cycle initiation message : " + expectInitiationMessage); + List options = new ArrayList<>(); + Collections.addAll(options, Utils.getTestJavaOpts()); + Collections.addAll(options, + "-XX:InitiatingHeapOccupancyPercent=" + ihop, + "-Dmemory.fill=" + (heapSize * 1024 * 1024 * pctToFill / 100), + "-XX:MaxHeapSize=" + heapSize + "M", + "-XX:InitialHeapSize=" + heapSize + "M" + ); + Collections.addAll(options, COMMON_OPTIONS); + options.add(AppIHOP.class.getName()); + + OutputAnalyzer out = ProcessTools.executeTestJvm(options.toArray(new String[options.size()])); + + if (out.getExitValue() != 0) { + System.out.println(out.getOutput()); + throw new RuntimeException("IhopTest failed with exit code " + out.getExitValue()); + } + + checkResult(out, expectInitiationMessage); + } + + /** + * Checks execution results to ensure that concurrent cycle was initiated or + * was not. + * + * @param out + * @param expectInitiationMessage true - test expects for concurrent cycle initiation. + * false - test does not expect for concurrent cycle initiation + */ + private static void checkResult(OutputAnalyzer out, boolean expectInitiationMessage) { + // Find expected messages + List logItems = IhopUtils.getErgoInitiationMessages(out); + + // Concurrent cycle was not initiated but was expected. + if (logItems.isEmpty() && expectInitiationMessage) { + System.out.println(out.getOutput()); + throw new RuntimeException("Concurrent cycle was not initiated."); + } + IhopUtils.checkIhopLogValues(out); + } + + static class AppIHOP { + + /** + * Simple class which fills part of memory and initiates GC. + * To be executed in separate VM. + * Expect next VM properties to be set: + * memory.fill - amount of garbage to be created. + */ + private static final long MEMORY_TO_FILL = Integer.getInteger("memory.fill"); + private final static int CHUNK_SIZE = 10000; + + public final static ArrayList STORAGE = new ArrayList<>(); + + public static void main(String[] args) throws InterruptedException { + + // Calculate part of heap to be filled to achieve expected occupancy. + System.out.println("Mem to fill:" + MEMORY_TO_FILL); + if (MEMORY_TO_FILL <= 0) { + throw new RuntimeException("Wrong memory size: " + MEMORY_TO_FILL); + } + try { + createGarbage(MEMORY_TO_FILL); + } catch (OutOfMemoryError oome) { + return; + } + // Concurrent cycle initiation should start at end of Young GC cycle. + // Will fill entire young gen with garbage to guarantee that Young GC was initiated. + try { + createGarbage(TestIHOPStatic.YOUNG_SIZE); + } catch (OutOfMemoryError oome) { + } + } + + private static void createGarbage(long memToFill) { + for (long i = 0; i < memToFill / CHUNK_SIZE; i++) { + STORAGE.add(new byte[CHUNK_SIZE]); + } + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/ihop/lib/IhopUtils.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/ihop/lib/IhopUtils.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package gc.g1.ihop.lib; + +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import jdk.test.lib.OutputAnalyzer; + + +/** + * Utility class to extract IHOP related information from the GC log. + * The class provides a number of static method to be used from tests. + */ +public class IhopUtils { + + // Examples of GC log for IHOP: + // [0.402s][debug][gc,ergo,ihop] GC(9) Do not request concurrent cycle initiation (still doing mixed collections) occupancy: 66060288B allocation request: 0B threshold: 59230757B (88.26) source: end of GC + // [0.466s][debug][gc,ergo,ihop] GC(18) Request concurrent cycle initiation (occupancy higher than threshold) occupancy: 52428800B allocation request: 0B threshold: 0B (0.00) source: end of GC + + /** + * Patterns are used for extracting occupancy and threshold from GC log. + */ + private final static Pattern OCCUPANCY = Pattern.compile("occupancy: (\\d+)B"); + private final static Pattern THRESHOLD = Pattern.compile("threshold: (\\d+)B"); + + /** + * Messages related to concurrent cycle initiation. + */ + private final static String CYCLE_INITIATION_MESSAGE = "Request concurrent cycle initiation (occupancy higher than threshold)"; + private final static String CYCLE_INITIATION_MESSAGE_FALSE = "Do not request concurrent cycle initiation (still doing mixed collections)"; + private final static String ADAPTIVE_IHOP_PREDICTION_ACTIVE_MESSAGE = "prediction active: true"; + + /** + * Finds strings which contains patterns for finding. + * + * @param outputAnalyzer List of string for IHOP messages extraction + * @param stringsToFind Strings which is checked for matching with OutputAnalyzer content + * @return List of strings which were matched. + */ + private static List findInLog(OutputAnalyzer outputAnalyzer, String... stringsToFind) { + return outputAnalyzer.asLines().stream() + .filter(string -> { + return Stream.of(stringsToFind) + .filter(find -> string.contains(find)) + .findAny() + .isPresent(); + }) + .collect(Collectors.toList()); + } + + /** + * Checks that memory occupancy is greater or equal to the threshold. + * This methods searches for occupancy and threshold in the GC log corresponding Conc Mark Cycle initiation + * and compare their values.If no CMC initiation happens, does nothing. + * @param outputAnalyzer OutputAnalyzer which contains GC log to be checked + * @throw RuntimeException If check fails + */ + public static void checkIhopLogValues(OutputAnalyzer outputAnalyzer) { + // Concurrent cycle was initiated but was not expected. + // Checks occupancy should be greater than threshold. + List logItems = IhopUtils.getErgoMessages(outputAnalyzer); + logItems.stream() + .forEach(item -> { + long occupancy = IhopUtils.getLongByPattern(item, IhopUtils.OCCUPANCY); + long threshold = IhopUtils.getLongByPattern(item, IhopUtils.THRESHOLD); + if (occupancy < threshold) { + System.out.println(outputAnalyzer.getOutput()); + throw new RuntimeException("Concurrent cycle initiation is unexpected. Occupancy (" + occupancy + ") is less then threshold (" + threshold + ")"); + } + System.out.printf("Concurrent cycle was initiated with occupancy = %d and threshold = %d%n", occupancy, threshold); + }); + } + + private static Long getLongByPattern(String line, Pattern pattern) { + Matcher number = pattern.matcher(line); + if (number.find()) { + return Long.parseLong(number.group(1)); + } + System.out.println(line); + throw new RuntimeException("Cannot find Long in string."); + } + + /** + * Finds concurrent cycle initiation messages. + * @param outputAnalyzer OutputAnalyzer + * @return List with messages which were found. + */ + public static List getErgoInitiationMessages(OutputAnalyzer outputAnalyzer) { + return IhopUtils.findInLog(outputAnalyzer, CYCLE_INITIATION_MESSAGE); + } + + /** + * Gets IHOP ergo messages from GC log. + * @param outputAnalyzer + * @return List with found messages + */ + private static List getErgoMessages(OutputAnalyzer outputAnalyzer) { + return IhopUtils.findInLog(outputAnalyzer, CYCLE_INITIATION_MESSAGE, CYCLE_INITIATION_MESSAGE_FALSE); + } + + /** + * Checks that GC log contains expected ergonomic messages + * @param outputAnalyzer OutputAnalyer with GC log for checking + * @throws RuntimeException If no IHOP ergo messages were not found + */ + public static void checkErgoMessagesExist(OutputAnalyzer outputAnalyzer) { + String output = outputAnalyzer.getOutput(); + if (!(output.contains(CYCLE_INITIATION_MESSAGE) | output.contains(CYCLE_INITIATION_MESSAGE_FALSE))) { + throw new RuntimeException("Cannot find expected IHOP ergonomics messages"); + } + } + + /** + * Checks that adaptive IHOP was activated + * @param outputAnalyzer OutputAnalyer with GC log for checking + * @throws RuntimeException If IHOP message was not found. + */ + public static void checkAdaptiveIHOPWasActivated(OutputAnalyzer outputAnalyzer) { + outputAnalyzer.shouldContain(ADAPTIVE_IHOP_PREDICTION_ACTIVE_MESSAGE); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/TestPLABPromotion.java --- a/hotspot/test/gc/g1/plab/TestPLABPromotion.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/plab/TestPLABPromotion.java Wed Jul 05 21:35:27 2017 +0200 @@ -41,17 +41,16 @@ package gc.g1.plab; import java.util.List; -import java.util.Map; import java.util.Arrays; import java.io.PrintStream; import gc.g1.plab.lib.AppPLABPromotion; import gc.g1.plab.lib.LogParser; import gc.g1.plab.lib.PLABUtils; +import gc.g1.plab.lib.PlabInfo; import jdk.test.lib.OutputAnalyzer; import jdk.test.lib.ProcessTools; -import jdk.test.lib.Platform; /** * Test checks PLAB promotion of different size objects. @@ -63,6 +62,12 @@ // GC ID with old PLAB statistics private final static long GC_ID_OLD_STATS = 2l; + private final static String PLAB_USED_FIELD_NAME = "used"; + private final static String PLAB_DIRECT_ALLOCATED_FIELD_NAME = "direct allocated"; + private final static List FIELDS_TO_EXTRACT = Arrays.asList(PLAB_USED_FIELD_NAME, PLAB_DIRECT_ALLOCATED_FIELD_NAME); + + private static String output; + // Allowable difference for memory consumption (percentage) private final static long MEM_DIFFERENCE_PCT = 5; @@ -120,11 +125,12 @@ System.out.println(out.getOutput()); throw new RuntimeException("Expect exit code 0."); } - checkResults(out.getOutput(), testCase); + output = out.getOutput(); + checkResults(testCase); } } - private static void checkResults(String output, TestCase testCase) { + private static void checkResults(TestCase testCase) { long plabAllocatedSurvivor; long directAllocatedSurvivor; long plabAllocatedOld; @@ -132,65 +138,89 @@ long memAllocated = testCase.getMemToFill(); LogParser logParser = new LogParser(output); - Map survivorStats = getPlabStats(logParser, LogParser.ReportType.SURVIVOR_STATS, GC_ID_SURVIVOR_STATS); - Map oldStats = getPlabStats(logParser, LogParser.ReportType.OLD_STATS, GC_ID_OLD_STATS); + PlabInfo survivorPlabInfo = logParser.getSpecifiedStats(GC_ID_SURVIVOR_STATS, LogParser.ReportType.SURVIVOR_STATS, FIELDS_TO_EXTRACT); + PlabInfo oldPlabInfo = logParser.getSpecifiedStats(GC_ID_OLD_STATS, LogParser.ReportType.OLD_STATS, FIELDS_TO_EXTRACT); - plabAllocatedSurvivor = survivorStats.get("used"); - directAllocatedSurvivor = survivorStats.get("direct allocated"); - plabAllocatedOld = oldStats.get("used"); - directAllocatedOld = oldStats.get("direct allocated"); + checkFields(survivorPlabInfo); + checkFields(oldPlabInfo); + + plabAllocatedSurvivor = survivorPlabInfo.get(PLAB_USED_FIELD_NAME); + directAllocatedSurvivor = survivorPlabInfo.get(PLAB_DIRECT_ALLOCATED_FIELD_NAME); + plabAllocatedOld = oldPlabInfo.get(PLAB_USED_FIELD_NAME); + directAllocatedOld = oldPlabInfo.get(PLAB_DIRECT_ALLOCATED_FIELD_NAME); System.out.printf("Survivor PLAB allocated:%17d Direct allocated: %17d Mem consumed:%17d%n", plabAllocatedSurvivor, directAllocatedSurvivor, memAllocated); System.out.printf("Old PLAB allocated:%17d Direct allocated: %17d Mem consumed:%17d%n", plabAllocatedOld, directAllocatedOld, memAllocated); // Unreachable objects case if (testCase.isDeadObjectCase()) { - // No dead objects should be promoted - if (!(checkRatio(plabAllocatedSurvivor, memAllocated) && checkRatio(directAllocatedSurvivor, memAllocated))) { - System.out.println(output); - throw new RuntimeException("Unreachable objects should not be allocated using PLAB or direct allocated to Survivor"); - } - if (!(checkRatio(plabAllocatedOld, memAllocated) && checkRatio(directAllocatedOld, memAllocated))) { - System.out.println(output); - throw new RuntimeException("Unreachable objects should not be allocated using PLAB or direct allocated to Old"); - } + checkDeadObjectsPromotion(plabAllocatedSurvivor, directAllocatedSurvivor, memAllocated); + checkDeadObjectsPromotion(plabAllocatedOld, directAllocatedOld, memAllocated); + } else { // Live objects case if (testCase.isPromotedByPLAB()) { - // All live small objects should be promoted using PLAB - if (!checkDifferenceRatio(plabAllocatedSurvivor, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Expect that Survivor PLAB allocation are similar to all mem consumed"); - } - if (!checkDifferenceRatio(plabAllocatedOld, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Expect that Old PLAB allocation are similar to all mem consumed"); - } + checkLiveObjectsPromotion(plabAllocatedSurvivor, memAllocated, "Expect that Survivor PLAB allocation are similar to all mem consumed"); + checkLiveObjectsPromotion(plabAllocatedOld, memAllocated, "Expect that Old PLAB allocation are similar to all mem consumed"); } else { // All big objects should be directly allocated - if (!checkDifferenceRatio(directAllocatedSurvivor, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Test fails. Expect that Survivor direct allocation are similar to all mem consumed"); - } - if (!checkDifferenceRatio(directAllocatedOld, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Test fails. Expect that Old direct allocation are similar to all mem consumed"); - } + checkLiveObjectsPromotion(directAllocatedSurvivor, memAllocated, "Expect that Survivor direct allocation are similar to all mem consumed"); + checkLiveObjectsPromotion(directAllocatedOld, memAllocated, "Expect that Old direct allocation are similar to all mem consumed"); } - // All promoted objects size should be similar to all consumed memory - if (!checkDifferenceRatio(plabAllocatedSurvivor + directAllocatedSurvivor, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Test fails. Expect that Survivor gen total allocation are similar to all mem consumed"); - } - if (!checkDifferenceRatio(plabAllocatedOld + directAllocatedOld, memAllocated)) { - System.out.println(output); - throw new RuntimeException("Test fails. Expect that Old gen total allocation are similar to all mem consumed"); - } + checkTotalPromotion(plabAllocatedSurvivor, directAllocatedSurvivor, memAllocated, "Expect that Survivor gen total allocation are similar to all mem consumed"); + checkTotalPromotion(plabAllocatedOld, directAllocatedOld, memAllocated, "Expect that Old gen total allocation are similar to all mem consumed"); } System.out.println("Test passed!"); } + private static void checkTotalPromotion(long plabAllocatedSurvivor, long directAllocatedSurvivor, long memAllocated, String exceptionMessage) { + // All promoted objects size should be similar to all consumed memory + if (!checkDifferenceRatio(plabAllocatedSurvivor + directAllocatedSurvivor, memAllocated)) { + System.out.println(output); + throw new RuntimeException(exceptionMessage); + } + } + + /** + * Checks that live objects were promoted as expected. + * @param plabAllocated + * @param totalMemAllocated + * @param exceptionMessage + */ + private static void checkLiveObjectsPromotion(long plabAllocated, long totalMemAllocated, String exceptionMessage) { + // All live small objects should be promoted using PLAB + if (!checkDifferenceRatio(plabAllocated, totalMemAllocated)) { + System.out.println(output); + throw new RuntimeException(exceptionMessage); + } + } + + /** + * Checks that dead objects are not promoted. + * @param plabPromoted promoted by PLAB + * @param directlyPromoted + * @param memoryAllocated total memory allocated + */ + private static void checkDeadObjectsPromotion(long plabPromoted, long directlyPromoted, long memoryAllocated) { + // No dead objects should be promoted + if (!(checkRatio(plabPromoted, memoryAllocated) && checkRatio(directlyPromoted, memoryAllocated))) { + System.out.println(output); + throw new RuntimeException("Unreachable objects should not be allocated using PLAB or directly allocated to Survivor/Old"); + } + } + + /** + * Checks that PLAB statistics contains expected fields. + * @param info + */ + private static void checkFields(PlabInfo info) { + if (!info.checkFields(FIELDS_TO_EXTRACT)) { + System.out.println(output); + throw new RuntimeException("PLAB log does not contain expected fields"); + } + } + /** * Returns true if checkedValue is less than MEM_DIFFERENCE_PCT percent of controlValue. * @@ -215,14 +245,6 @@ return (Math.abs(checkedValue - controlValue) / controlValue) * 100L < MEM_DIFFERENCE_PCT; } - private static Map getPlabStats(LogParser logParser, LogParser.ReportType type, long gc_id) { - - Map survivorStats = logParser.getEntries() - .get(gc_id) - .get(type); - return survivorStats; - } - /** * Description of one test case. */ diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/TestPLABResize.java --- a/hotspot/test/gc/g1/plab/TestPLABResize.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/plab/TestPLABResize.java Wed Jul 05 21:35:27 2017 +0200 @@ -35,23 +35,21 @@ * gc.g1.plab.lib.MemoryConsumer * gc.g1.plab.lib.PLABUtils * gc.g1.plab.lib.AppPLABResize - * @ignore 8150183 * @run main ClassFileInstaller sun.hotspot.WhiteBox * sun.hotspot.WhiteBox$WhiteBoxPermission * @run main gc.g1.plab.TestPLABResize */ package gc.g1.plab; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.stream.Collectors; import java.io.PrintStream; import gc.g1.plab.lib.LogParser; import gc.g1.plab.lib.PLABUtils; import gc.g1.plab.lib.AppPLABResize; +import gc.g1.plab.lib.PlabReport; import jdk.test.lib.OutputAnalyzer; import jdk.test.lib.ProcessTools; @@ -75,6 +73,8 @@ private static final int ITERATIONS_MEDIUM = 5; private static final int ITERATIONS_HIGH = 8; + private static final String PLAB_SIZE_FIELD_NAME = "actual"; + private final static TestCase[] TEST_CASES = { new TestCase(WASTE_PCT_SMALL, OBJECT_SIZE_SMALL, GC_NUM_SMALL, ITERATIONS_MEDIUM), new TestCase(WASTE_PCT_SMALL, OBJECT_SIZE_MEDIUM, GC_NUM_HIGH, ITERATIONS_SMALL), @@ -110,41 +110,33 @@ */ private static void checkResults(String output, TestCase testCase) { final LogParser log = new LogParser(output); - final Map>> entries = log.getEntries(); + final PlabReport report = log.getEntries(); - final ArrayList plabSizes = entries.entrySet() - .stream() - .map(item -> { - return item.getValue() - .get(LogParser.ReportType.SURVIVOR_STATS) - .get("actual"); - }) - .collect(Collectors.toCollection(ArrayList::new)); + final List plabSizes = report.entryStream() + .map(item -> item.getValue() + .get(LogParser.ReportType.SURVIVOR_STATS) + .get(PLAB_SIZE_FIELD_NAME) + ) + .collect(Collectors.toList()); // Check that desired plab size was changed during iterations. - // It should decrease during first half of iterations - // and increase after. - List decreasedPlabs = plabSizes.subList(testCase.getIterations(), testCase.getIterations() * 2); - List increasedPlabs = plabSizes.subList(testCase.getIterations() * 2, testCase.getIterations() * 3); + // The test case does 3 rounds of allocations. The second round of N allocations and GC's + // has a decreasing size of allocations so that iterations N to 2*N -1 will be of decreasing size. + // The third round with iterations 2*N to 3*N -1 has increasing sizes of allocation. + long startDesiredPLABSize = plabSizes.get(testCase.getIterations()); + long endDesiredPLABSize = plabSizes.get(testCase.getIterations() * 2 - 1); - Long prev = decreasedPlabs.get(0); - for (int index = 1; index < decreasedPlabs.size(); ++index) { - Long current = decreasedPlabs.get(index); - if (prev < current) { - System.out.println(output); - throw new RuntimeException("Test failed! Expect that previous PLAB size should be greater than current. Prev.size: " + prev + " Current size:" + current); - } - prev = current; + if (startDesiredPLABSize < endDesiredPLABSize) { + System.out.println(output); + throw new RuntimeException("Test failed! Expect that initial PLAB size should be greater than checked. Initial size: " + startDesiredPLABSize + " Checked size:" + endDesiredPLABSize); } - prev = increasedPlabs.get(0); - for (int index = 1; index < increasedPlabs.size(); ++index) { - Long current = increasedPlabs.get(index); - if (prev > current) { - System.out.println(output); - throw new RuntimeException("Test failed! Expect that previous PLAB size should be less than current. Prev.size: " + prev + " Current size:" + current); - } - prev = current; + startDesiredPLABSize = plabSizes.get(testCase.getIterations() * 2); + endDesiredPLABSize = plabSizes.get(testCase.getIterations() * 3 - 1); + + if (startDesiredPLABSize > endDesiredPLABSize) { + System.out.println(output); + throw new RuntimeException("Test failed! Expect that initial PLAB size should be less than checked. Initial size: " + startDesiredPLABSize + " Checked size:" + endDesiredPLABSize); } System.out.println("Test passed!"); @@ -195,7 +187,6 @@ return Arrays.asList("-XX:ParallelGCThreads=" + parGCThreads, "-XX:ParallelGCBufferWastePct=" + wastePct, "-XX:+ResizePLAB", - "-Dthreads=" + parGCThreads, "-Dchunk.size=" + chunkSize, "-Diterations=" + iterations, "-XX:NewSize=16m", diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/lib/AppPLABResize.java --- a/hotspot/test/gc/g1/plab/lib/AppPLABResize.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/plab/lib/AppPLABResize.java Wed Jul 05 21:35:27 2017 +0200 @@ -38,7 +38,6 @@ * Expects the following properties to be set: * - iterations - amount of iteration per cycle. * - chunk.size - size of objects to be allocated - * - threads - number of gc threads (-XX:ParallelGCThreads) to calculate PLAB sizes. */ final public class AppPLABResize { @@ -47,7 +46,6 @@ // Defined by properties. private static final int ITERATIONS = Integer.getInteger("iterations"); private static final long CHUNK = Long.getLong("chunk.size"); - private static final int GC_THREADS = Integer.getInteger("threads"); private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); @@ -59,13 +57,13 @@ */ public static void main(String[] args) { - if (ITERATIONS == 0 || CHUNK == 0 || GC_THREADS == 0) { + if (ITERATIONS == 0 || CHUNK == 0) { throw new IllegalArgumentException("Properties should be set"); } long wordSize = Platform.is32bit() ? 4l : 8l; // PLAB size is shared between threads. - long initialMemorySize = wordSize * GC_THREADS * MEM_ALLOC_WORDS; + long initialMemorySize = wordSize * MEM_ALLOC_WORDS; // Expect changing memory to half during all iterations. long memChangeStep = initialMemorySize / 2 / ITERATIONS; diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/lib/LogParser.java --- a/hotspot/test/gc/g1/plab/lib/LogParser.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/g1/plab/lib/LogParser.java Wed Jul 05 21:35:27 2017 +0200 @@ -22,13 +22,15 @@ */ package gc.g1.plab.lib; -import java.util.EnumMap; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Scanner; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; /** * LogParser class parses VM output to get PLAB and ConsumptionStats values. @@ -44,9 +46,6 @@ */ final public class LogParser { - // Name for GC ID field in report. - public final static String GC_ID = "gc_id"; - /** * Type of parsed log element. */ @@ -57,7 +56,8 @@ private final String log; - private final Map>> reportHolder; + // Contains Map of PLAB statistics for given log. + private final PlabReport report; // GC ID private static final Pattern GC_ID_PATTERN = Pattern.compile("\\[gc,plab\\s*\\] GC\\((\\d+)\\)"); @@ -65,7 +65,7 @@ private static final Pattern PAIRS_PATTERN = Pattern.compile("\\w* \\w+:\\s+\\d+"); /** - * Construct LogParser Object + * Construct LogParser object, parse log file with PLAB statistics and store it into report. * * @param log - VM Output */ @@ -74,71 +74,123 @@ throw new IllegalArgumentException("Parameter log should not be null."); } this.log = log; - reportHolder = parseLines(); + report = parseLines(); } /** - * @return log which is being processed + * @return log which was processed */ public String getLog() { return log; } /** - * Returns list of log entries. + * Returns the GC log entries for Survivor and Old stats. + * The entries are represented as a map of gcID to the StatMap. * - * @return list of Pair with ReportType and Map of parameters/values. + * @return The log entries for the Survivor and Old stats. */ - public Map>> getEntries() { - return reportHolder; + public PlabReport getEntries() { + return report; } - private Map>> parseLines() throws NumberFormatException { + private PlabReport parseLines() throws NumberFormatException { Scanner lineScanner = new Scanner(log); - Map>> allocationStatistics = new HashMap<>(); + PlabReport plabReport = new PlabReport(); Optional gc_id; while (lineScanner.hasNextLine()) { String line = lineScanner.nextLine(); - gc_id = getGcId(line); - if ( gc_id.isPresent() ) { + gc_id = getGcId(line, GC_ID_PATTERN); + if (gc_id.isPresent()) { Matcher matcher = PAIRS_PATTERN.matcher(line); if (matcher.find()) { - Map> oneReportItem; - ReportType reportType; - - if (!allocationStatistics.containsKey(gc_id.get())) { - allocationStatistics.put(gc_id.get(), new EnumMap<>(ReportType.class)); + if (!plabReport.containsKey(gc_id.get())) { + plabReport.put(gc_id.get(), new PlabGCStatistics()); } + ReportType reportType = line.contains("Young") ? ReportType.SURVIVOR_STATS : ReportType.OLD_STATS; - if ( line.contains("Young") ) { - reportType = ReportType.SURVIVOR_STATS; - } else { - reportType = ReportType.OLD_STATS; - } - - oneReportItem = allocationStatistics.get(gc_id.get()); - if (!oneReportItem.containsKey(reportType)) { - oneReportItem.put(reportType,new HashMap()); + PlabGCStatistics gcStat = plabReport.get(gc_id.get()); + if (!gcStat.containsKey(reportType)) { + gcStat.put(reportType, new PlabInfo()); } // Extract all pairs from log. - Map plabStats = oneReportItem.get(reportType); + PlabInfo plabInfo = gcStat.get(reportType); do { String pair = matcher.group(); String[] nameValue = pair.replaceAll(": ", ":").split(":"); - plabStats.put(nameValue[0].trim(), Long.parseLong(nameValue[1])); + plabInfo.put(nameValue[0].trim(), Long.parseLong(nameValue[1])); } while (matcher.find()); } } } - return allocationStatistics; + return plabReport; } - private Optional getGcId(String line) { - Matcher number = GC_ID_PATTERN.matcher(line); + private static Optional getGcId(String line, Pattern pattern) { + Matcher number = pattern.matcher(line); if (number.find()) { return Optional.of(Long.parseLong(number.group(1))); } return Optional.empty(); } + + /** + * Extracts GC ID from log. + * + * @param line - one line of log. + * @return GC ID + */ + public static Long getGcIdFromLine(String line, Pattern pattern) { + Optional gcId = getGcId(line, pattern); + if (!gcId.isPresent()) { + System.out.println(line); + throw new RuntimeException("Cannot find GC ID in log."); + } + return gcId.get(); + } + + /** + * Returns Map which contains specified statistics for specified gc ids. + * @param specifiedGcId gc id to get + * @param type PLAB type + * @param fieldsName name of fields in PlabStatistics + * @return + **/ + public Map getSpecifiedStats(List specifiedGcId, LogParser.ReportType type, List fieldsName) { + return getSpecifiedStats(specifiedGcId, type, fieldsName, true); + } + + /** + * Returns PlabStatistics for specified GC ID. + * @param specifiedGcId + * @param type type of statistics + * @param fieldsName name of fields in PlabStatistics + * @return + **/ + public PlabInfo getSpecifiedStats(long specifiedGcId, LogParser.ReportType type, List fieldsName) { + return getSpecifiedStats(Arrays.asList(specifiedGcId), type, fieldsName, true).get(specifiedGcId); + } + + /** + * Returns Map which contains specified statistics. Filters out specified gc ids. + * @param specifiedGcIdForExclude + * @param type + * @param fieldsName + * @return + **/ + public Map getExcludedSpecifiedStats(List specifiedGcIdForExclude, LogParser.ReportType type, List fieldsName) { + return getSpecifiedStats(specifiedGcIdForExclude, type, fieldsName, false); + } + + private Map getSpecifiedStats(List gcIds, LogParser.ReportType type, List fieldNames, boolean extractId) { + return new HashMap<>( + getEntries().entryStream() + .filter(gcLogItem -> extractId == gcIds.contains(gcLogItem.getKey())) + .collect(Collectors.toMap(gcLogItem -> gcLogItem.getKey(), + gcLogItem -> gcLogItem.getValue().get(type).filter(fieldNames) + ) + ) + ); + } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/lib/PlabGCStatistics.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/plab/lib/PlabGCStatistics.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package gc.g1.plab.lib; + +import java.util.EnumMap; +import java.util.Map; + +import gc.g1.plab.lib.LogParser.ReportType; + +/** + * Class that represents PLAB statistics for a single GC. + * It includes both Survivor and Old PLAB statistics. + */ +public class PlabGCStatistics { + + private final Map plabGCStatistics; + + public PlabGCStatistics() { + plabGCStatistics = new EnumMap<>(ReportType.class); + } + + /** + * Checks if the statistics contains the requested type. + * @param reportType + * @returns true, if contains, false otherwise + */ + public boolean containsKey(ReportType reportType) { + return plabGCStatistics.containsKey(reportType); + } + + /** + * Put pair of ReportType and PlabInfo to statistics. + * @param reportType + * @param plabInfo + */ + public void put(ReportType reportType, PlabInfo plabInfo) { + plabGCStatistics.put(reportType, plabInfo); + } + + /** + * Returns PlabInfo of specified type + * @param reportType + * @return + */ + public PlabInfo get(ReportType reportType) { + return plabGCStatistics.get(reportType); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/lib/PlabInfo.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/plab/lib/PlabInfo.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package gc.g1.plab.lib; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class PlabInfo { + + private final Map plabInfo; + + public PlabInfo() { + plabInfo = new HashMap<>(); + } + + private PlabInfo(Map map) { + plabInfo = new HashMap<>(map); + } + + /** + * Add key and value to underlying Map. + * @param key PLAB info field name + * @param value PLAB info value for field + */ + public void put(String key, long value) { + plabInfo.put(key, value); + } + + /** + * Get stream of Map.Entry representing underlying Map with PLAB information. + */ + public Stream> entryStream() { + return plabInfo.entrySet().stream(); + } + + /** + * Returns the PlabInfo narrowed for the given fields only + * @param fields + * @return PlabInfo + */ + public PlabInfo filter(List fields) { + return new PlabInfo(entryStream() + .filter(field -> fields.contains(field.getKey())) + .collect(Collectors.toMap( + item -> item.getKey(), + item -> item.getValue()) + ) + ); + } + + /** + * Checks if statistic contains expected fields. + * @param fields fields which should be in statistic + * @return true if all fields are in statistic, false otherwise + */ + public boolean checkFields(List fields) { + for (String key : fields) { + if (!plabInfo.containsKey(key)) { + return false; + } + } + return true; + } + + /** + * Return a collection of the values. + * @return collection of values + */ + public Collection values() { + return plabInfo.values(); + } + + /** + * Get value for specified field. + * @param field + * @return long value which is contained in specified field + */ + public long get(String field) { + return plabInfo.get(field); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/g1/plab/lib/PlabReport.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/g1/plab/lib/PlabReport.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package gc.g1.plab.lib; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +/** + * Class contains representation of GC PLAB log. + */ +public class PlabReport { + + private final Map report = new HashMap<>(); + + public PlabReport() { + } + + /** + * Checks if underlying Map contains requested GC ID. + */ + public boolean containsKey(Long gcId) { + return report.containsKey(gcId); + } + + /** + * Puts GC ID and PlabGCStatistics to underlying Map. + */ + public void put(Long gcId, PlabGCStatistics plabStat) { + report.put(gcId, plabStat); + } + + /** + * Returns PlabGCStatistics for specified GC ID. + */ + public PlabGCStatistics get(Long gcId) { + return report.get(gcId); + } + + /** + * Returns Stream of Map.Entry of underlying Map. + */ + public Stream> entryStream() { + return report.entrySet().stream(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java --- a/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java Wed Jul 05 21:35:27 2017 +0200 @@ -37,7 +37,6 @@ * java.compiler * java.management * jdk.jvmstat/sun.jvmstat.monitor - * @ignore 8151460 * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters @@ -85,6 +84,9 @@ } private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception { + // Need to ensure that used is up to date and that all unreachable + // classes are unloaded before doing this check. + System.gc(); long before = getUsed(ns); fooClass = compileAndLoad("Foo", "public class Foo { }"); System.gc(); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java --- a/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java Wed Jul 05 21:35:27 2017 +0200 @@ -36,15 +36,14 @@ * @modules java.base/jdk.internal.misc * java.management * jdk.jvmstat/sun.jvmstat.monitor - * @ignore 8151460 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools */ public class TestPerfCountersAndMemoryPools { public static void main(String[] args) throws Exception { checkMemoryUsage("Metaspace", "sun.gc.metaspace"); - if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) { + if (InputArguments.contains("-XX:+UseCompressedClassPointers") && Platform.is64bit()) { checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace"); } } @@ -72,13 +71,17 @@ pool.getUsage().getInit(); pool.getUsage().getUsed(); pool.getUsage().getCommitted(); - assertEQ(1L, 1L); + assertEQ(1L, 1L, "Make assert load"); // Must do a GC to update performance counters System.gc(); - assertEQ(getMinCapacity(perfNS), pool.getUsage().getInit()); - assertEQ(getUsed(perfNS), pool.getUsage().getUsed()); - assertEQ(getCapacity(perfNS), pool.getUsage().getCommitted()); + assertEQ(getMinCapacity(perfNS), pool.getUsage().getInit(), "MinCapacity out of sync"); + + // Adding a second GC due to metadata allocations caused by getting the + // initial size from the pool. This is needed when running with -Xcomp. + System.gc(); + assertEQ(getUsed(perfNS), pool.getUsage().getUsed(), "Used out of sync"); + assertEQ(getCapacity(perfNS), pool.getUsage().getCommitted(), "Committed out of sync"); } private static long getMinCapacity(String ns) throws Exception { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/gc/survivorAlignment/TestPromotionLABLargeSurvivorAlignment.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/survivorAlignment/TestPromotionLABLargeSurvivorAlignment.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8060463 + * @summary Verify that objects promoted from eden space to survivor space + * with large values for SurvivorAlignmentInBytes succeed. + * @requires vm.opt.ExplicitGCInvokesConcurrent != true + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=8 -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=16 -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=512 -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=1k -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=4k -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + * @run main/othervm -Xmx128m + * -XX:+UnlockExperimentalVMOptions + * -XX:SurvivorAlignmentInBytes=16k -XX:SurvivorRatio=1 + * -XX:-ExplicitGCInvokesConcurrent -XX:-ResizePLAB + * TestPromotionLABLargeSurvivorAlignment + */ +public class TestPromotionLABLargeSurvivorAlignment { + public static void main(String args[]) { + Object garbage[] = new Object[1000000]; + for (int i = 0; i < garbage.length; i++) { + garbage[i] = new byte[0]; + } + for (int i = 0; i < 2; i++) { + System.gc(); + } + } +} + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java --- a/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Wed Jul 05 21:35:27 2017 +0200 @@ -90,6 +90,13 @@ excludeTestMaxRange("CICompilerCount"); /* + * JDK-8153340 + * Temporary exclude AllocatePrefetchDistance option from testing + */ + excludeTestRange("AllocatePrefetchDistance"); + + + /* * JDK-8136766 * Temporarily remove ThreadStackSize from testing because Windows can set it to 0 * (for default OS size) but other platforms insist it must be greater than 0 @@ -97,15 +104,6 @@ excludeTestRange("ThreadStackSize"); /* - * JDK-8143958 - * Temporarily exclude testing of max range for Shared* flags - */ - excludeTestMaxRange("SharedReadWriteSize"); - excludeTestMaxRange("SharedReadOnlySize"); - excludeTestMaxRange("SharedMiscDataSize"); - excludeTestMaxRange("SharedMiscCodeSize"); - - /* * Remove the flag controlling the size of the stack because the * flag has direct influence on the physical memory usage of * the VM. diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/CommandLine/TraceExceptionsTest.java --- a/hotspot/test/runtime/CommandLine/TraceExceptionsTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/CommandLine/TraceExceptionsTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( "-Xlog:exceptions=info", "NoClassFound"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldContain(""); + output.shouldContain(""); output.shouldNotContain(""); output.shouldHaveExitValue(1); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/AbstractMethodErrorTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/AbstractMethodErrorTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate AbstractMethodErrorTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm/timeout=300 -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies AbstractMethodErrorTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class AbstractMethodErrorTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.AME); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokevirtual tests */ + /* Group 63: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 64: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 65: callsite = methodref = resolved, possibly + * skip different package in selection. + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionPackageSkipNoOverride), + /* Group 66: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsIface), + /* Group 67: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 68: callsite :> methodref, methodref = expected, + * possibly skip different package in selection. + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionPackageSkipNoOverride), + /* Group 69: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 70: callsite :> methodref, methodref != expected, + * possibly skip different package in selection + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionPackageSkipNoOverride), + /* Group 71: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsIface), + /* Group 72: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 73: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionPackageSkipNoOverride), + /* Group 74: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 75: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionPackageSkipNoOverride), + /* Group 76: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsIface), + /* Group 77: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 78: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsIface), + /* Group 79: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + + /* Group 80: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 81: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsIface), + /* Group 82: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 83: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 84: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsIface), + + /* Reabstraction during selection */ + /* Group 85: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED, + MethodData.Access.PACKAGE, + MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteEqualsMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 86: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 87: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteEqualsMethodref, + Template.ReabstractMethodrefResolvedIface), + /* Group 88: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 89: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 90: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedIface), + /* Group 91: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 92: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 93: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedIface), + /* Group 94: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 95: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteEqualsMethodref, + Template.ReabstractMethodrefResolvedIface), + /* Group 96: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedClass), + + /* Group 97: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 98: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteSubclassMethodref, + Template.ReabstractMethodrefResolvedIface), + /* Group 99: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 100: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedClass), + /* Group 101: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractMethodrefResolvedIface), + + /* invokeinterface */ + /* Group 102: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelection), + /* Group 103: callsite = methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelection), + /* Group 104: callsite :> methodref, methodref = expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelection), + /* Group 105: callsite :> methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelection), + /* Group 106: callsite unrelated to methodref, methodref = expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelection), + /* Group 107: callsite unrelated to methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.ReabstractExpectedIface, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelection), + + /* Reabstraction during selection */ + /* Group 108: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteEqualsMethodref, + Template.ReabstractIfaceMethodrefResolved), + /* Group 109: callsite = methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.CallsiteEqualsMethodref, + Template.ReabstractIfaceMethodrefResolved), + /* Group 110: callsite :> methodref, methodref = expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.ReabstractIfaceMethodrefResolved), + /* Group 111: callsite :> methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.ReabstractIfaceMethodrefResolved), + /* Group 112: callsite unrelated to methodref, methodref = expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractIfaceMethodrefResolved), + /* Group 113: callsite unrelated to methodref, methodref != expected, + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.ReabstractIfaceMethodrefResolved), + + /* invokespecial tests */ + /* Group 114: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 115: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 116: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 117: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 118: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 119: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 120: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 121: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 122: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + + /* Group 123: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.ReabstractExpectedClass, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 124: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.ReabstractExpectedIface, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite) + ); + + private AbstractMethodErrorTest() { + super(testgroups); + } + + public static void main(final String... args) { + new AbstractMethodErrorTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/IllegalAccessErrorTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/IllegalAccessErrorTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate IllegalAccessErrorTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies IllegalAccessErrorTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class IllegalAccessErrorTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.IAE); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokestatic tests */ + /* Group 125 : callsite = methodref, methodref != + * expected, expected is class + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.TrivialObjectref), + /* Group 126: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 127: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 128: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 129: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 130: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.TrivialObjectref), + /* Group 131: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 132: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 133: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE, + MethodData.Access.PROTECTED, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 134: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE, + MethodData.Access.PROTECTED, + MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + + /* invokevirtual tests */ + /* Group 135: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 136: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 137: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 138: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 139: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 140: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 141: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 142: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 143: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + // protected causes verifier error. + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + /* Group 144: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + // protected causes verifier error. + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass), + + /* invokeinterface tests */ + /* Group 145: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + /* Group 146: callsite = methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + /* Group 147: callsite :> methodref, methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + /* Group 148: callsite :> methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + /* Group 149: callsite unrelated to methodref, methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + /* Group 150: callsite unrelated to methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelectionOverrideNonPublic), + + /* invokespecial tests */ + /* Group 151: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 152: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 153: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 154: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 155: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite), + /* Group 156: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefExactSubclassOfCallsite) + ); + + private IllegalAccessErrorTest() { + super(testgroups); + } + + public static void main(final String... args) { + new IllegalAccessErrorTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeInterfaceICCE.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeInterfaceICCE.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate IncompatibleClassChangeError + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm/timeout=500 -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeInterfaceICCE + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeInterfaceICCE extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.ICCE); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokeinterface tests */ + + /* resolved method is static*/ + /* Group 168: methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefSelection), + /* Group 169: methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefSelection), + + /* methodref is a class */ + /* Group 170: methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsClass), + /* Group 171: methodref != expected, expected is class */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsClass), + /* Group 172: methodref != expected expected is interface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsIface), + /* Group 173: ambiguous resolution */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefAmbiguous, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefSelectionNoOverride), + /* Group 174: ambiguous selection */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefAmbiguousResolvedIsIface), + + /* Group 175: private method in interface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelection) + ); + + private InvokeInterfaceICCE() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeInterfaceICCE().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeInterfaceSuccessTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeInterfaceSuccessTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate InvokeInterfaceSuccessTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm/timeout=300 -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeInterfaceSuccessTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeInterfaceSuccessTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.invoke = SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE; + } + + private static final Collection testgroups = + Arrays.asList( + /* invokeinterface tests */ + + /* Group 40: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract), + /* Group 41: callsite = methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract), + /* Group 42: callsite :> methodref, methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract), + /* Group 43: callsite :> methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract), + /* Group 44: callsite unrelated to methodref, methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract), + /* Group 45: callsite unrelated to methodref, methodref != expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.IfaceMethodrefSelection, + Template.SelectionOverrideAbstract) + ); + + private InvokeInterfaceSuccessTest() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeInterfaceSuccessTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeSpecialICCE.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeSpecialICCE.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate IncompatibleClassChangeError + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeSpecialICCE + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeSpecialICCE extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.ICCE); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokespecial tests */ + /* resolved method is static*/ + /* Group 170: methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.InvokespecialCallsiteCases, + Template.ObjectrefAssignableToCallsite), + /* Group 171: methodref != expected, expected is class */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.InvokespecialCallsiteCases, + Template.ObjectrefAssignableToCallsite), + /* Group 172: methodref != expected, expected is interface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.InvokespecialCallsiteCases, + Template.ObjectrefAssignableToCallsite), + + /* Group 173: Ambiguous resolution */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefAmbiguous, + Template.IgnoredAbstract, + Template.InvokespecialCallsiteCases, + Template.ObjectrefAssignableToCallsite) + ); + + private InvokeSpecialICCE() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeSpecialICCE().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeSpecialSuccessTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeSpecialSuccessTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate InvokeSpecialSuccessTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeSpecialSuccessTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeSpecialSuccessTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.invoke = SelectionResolutionTestCase.InvokeInstruction.INVOKESPECIAL; + } + + private static final Collection testgroups = + Arrays.asList( + /* Group 46: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 47: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 48: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 49: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 50: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 51: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 52: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 53: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 54: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + + /* Group 55: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 56: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + + /* Funny cases */ + /* Group 57: callsite = methodref, methodref = + * expected expected is interface, expected and + * callsite in a different package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefEqualsOrExactSubclassOfCallsite), + /* Group 58: callsite = methodref, methodref \!= + * expected expected is interface, expected and + * callsite in a different package */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.ObjectrefEqualsOrExactSubclassOfCallsite), + /* Group 59: callsite subclass methodref, methodref = + * expected expected is interface, expected and + * callsite in a different package */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefEqualsOrExactSubclassOfCallsite), + /* Group 60: callsite subclass methodref, methodref + * \!= expected expected is interface, expected and + * callsite in a different package */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefEqualsOrExactSubclassOfCallsite), + + /* Methodref is an interface */ + /* Group 61: callsite :> methodref, methodref = expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite), + /* Group 62: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.ObjectrefAssignableToCallsite) + ); + + private InvokeSpecialSuccessTest() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeSpecialSuccessTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeStaticICCE.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeStaticICCE.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of invokestatic method selection and resolution cases that + * generate IncompatibleClassChangeError + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeStaticICCE + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeStaticICCE extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.ICCE); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokestatic tests */ + /* Group 157: methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.AllCallsiteCases, + Template.TrivialObjectref), + /* Group 158: methodref = expected, expected is interface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.AllCallsiteCases, + Template.TrivialObjectref), + /* Group 159: methodref != expected, expected is class + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.AllCallsiteCases, + Template.TrivialObjectref), + /* Group 160: methodref = expected, expected is interface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.AllCallsiteCases, + Template.TrivialObjectref) + ); + + private InvokeStaticICCE() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeStaticICCE().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeStaticSuccessTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeStaticSuccessTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate InvokeStaticSuccessTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main InvokeStaticSuccessTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeStaticSuccessTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.invoke = SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC; + } + + private static final Collection testgroups = + Arrays.asList( + /* invokestatic tests */ + /* Group 1: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteEqualsMethodref, + Template.TrivialObjectref), + /* Group 2: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.TrivialObjectref), + /* Group 3: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 4: callsite :> methodref, methodref = expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 5: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 6: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 7: callsite unrelated to methodref, methodref = expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 8: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 9: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteEqualsMethodref, + Template.TrivialObjectref), + /* Group 10: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 11: callsite :> methodref, methodref = expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 12: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteSubclassMethodref, + Template.TrivialObjectref), + /* Group 13: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 14: callsite unrelated to methodref, methodref = expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefEqualsExpected, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref), + /* Group 15: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedClass, + Template.CallsiteUnrelatedToMethodref, + Template.TrivialObjectref) + ); + + private InvokeStaticSuccessTest() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeStaticSuccessTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeVirtualICCE.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeVirtualICCE.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate IncompatibleClassChangeError + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm/timeout=1200 -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeVirtualICCE + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeVirtualICCE extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.ICCE); + } + + private static final Collection testgroups = + Arrays.asList( + /* invokevirtual tests */ + + /* resolved method is static*/ + /* Group 161: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsClass), + /* Group 162: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PACKAGE, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsClass), + /* Group 163: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.STATIC), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsIface), + + /* methodref is an interface */ + /* Group 164: callsite = methodref = expected */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefSelection), + /* Group 165: callsite = methodref, methodref != expected, + * expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PRIVATE), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.IfaceMethodrefNotEqualsExpected, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.IfaceMethodrefSelection), + + /* Group 166: Ambiguous resolution tests */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefAmbiguous, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefSelectionResolvedIsIfaceNoOverride), + /* Group 167: ambiguous selection */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.AllCallsiteCases, + Template.MethodrefAmbiguousResolvedIsIface) + ); + + private InvokeVirtualICCE() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeVirtualICCE().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/InvokeVirtualSuccessTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/InvokeVirtualSuccessTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate InvokeVirtualSuccessTest + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main/othervm/timeout=400 -XX:+IgnoreUnrecognizedVMOptions -XX:-VerifyDependencies InvokeVirtualSuccessTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class InvokeVirtualSuccessTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.invoke = SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL; + } + + private static final Collection testgroups = + Arrays.asList( + /* invokevirtual tests */ + /* Group 16: callsite = methodref = expected, no override */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClassNoOverride), + /* Group 17: callsite = methodref = expected, override allowed */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PROTECTED, + MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 18: callsite = methodref = resolved, possibly + * skip different package in selection. + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 19: callsite = methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 20: callsite = methodref, methodref \!= + * expected, possibly skip different package in + * selection. + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 21: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract), + /* Group 22: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 23: callsite :>, methodref = expected, + * possibly skip different package in selection + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 24: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 25: callsite :>, methodref = expected, + * possibly skip different package in selection + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 26: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract), + /* Group 27: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 28: callsite unrelated to methodref, + * methodref = expected, possibly skip different + * package in selection + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 29: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 30: callsite unrelated to methodref, + * methodref \!= expected, possibly skip different + * package in selection + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PACKAGE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionPackageSkip, + Template.SelectionOverrideAbstract), + /* Group 31: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract), + /* Group 32: callsite = methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC, + MethodData.Access.PROTECTED), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 33: callsite = methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteEqualsMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract), + /* Group 34: callsite :> methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + + /* Group 35: callsite :> methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 36: callsite :> methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteSubclassMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract), + /* Group 37: callsite unrelated to methodref, methodref = expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefEqualsExpected, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 38: callsite unrelated to methodref, methodref != expected, + * expected is class, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.CLASS), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedClass, + Template.MethodrefNotEqualsExpectedClass, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsClass, + Template.SelectionOverrideAbstract), + /* Group 39: callsite unrelated to methodref, methodref != expected, + * expected is interface, expected and callsite not in the same package + */ + new TestGroup.Simple(initBuilder, + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.DIFFERENT)), + Template.OverrideAbstractExpectedIface, + Template.MethodrefNotEqualsExpectedIface, + Template.IgnoredAbstract, + Template.CallsiteUnrelatedToMethodref, + Template.MethodrefSelectionResolvedIsIface, + Template.SelectionOverrideAbstract) + ); + + private InvokeVirtualSuccessTest() { + super(testgroups); + } + + public static void main(final String... args) { + new InvokeVirtualSuccessTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/NoSuchMethodErrorTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/NoSuchMethodErrorTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of method selection and resolution cases that + * generate NoSuchMethodError + * @modules java.base/jdk.internal.org.objectweb.asm + * @library /runtime/SelectionResolution/classes + * @build selectionresolution.* + * @run main NoSuchMethodErrorTest + */ + +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import selectionresolution.ClassData; +import selectionresolution.MethodData; +import selectionresolution.Result; +import selectionresolution.SelectionResolutionTest; +import selectionresolution.SelectionResolutionTestCase; +import selectionresolution.Template; + +public class NoSuchMethodErrorTest extends SelectionResolutionTest { + + private static final SelectionResolutionTestCase.Builder initBuilder = + new SelectionResolutionTestCase.Builder(); + + static { + initBuilder.setResult(Result.NSME); + } + + private static final MethodData concreteMethod = + new MethodData(MethodData.Access.PUBLIC, MethodData.Context.INSTANCE); + + private static final MethodData staticMethod = + new MethodData(MethodData.Access.PUBLIC, MethodData.Context.STATIC); + + private static final MethodData privateMethod = + new MethodData(MethodData.Access.PRIVATE, MethodData.Context.INSTANCE); + + private static final ClassData withDef = + new ClassData(ClassData.Package.SAME, concreteMethod); + + private static final ClassData withStaticDef = + new ClassData(ClassData.Package.SAME, staticMethod); + + private static final ClassData withPrivateDef = + new ClassData(ClassData.Package.SAME, staticMethod); + + private static final Template NoMethodResolutionTemplateClassBottom = + new Template("NoMethodResolutionTemplate", + /* Empty single class + * + * C[]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + builder.methodref = C; + }, + /* Class bottom, inherit empty class + * + * C2[]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Class bottom, inherit empty interface + * + * I[]() + * C[I]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C, I); + builder.methodref = C; + }, + /* Class bottom, inherit empty class and interface + * + * C2[](), I[]() + * C1[C2,I]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.methodref = C1; + }, + /* Class bottom, unrelated class defines + * + * C20[](con) + * C1[]() + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + builder.addClass(withDef); + builder.methodref = C; + }, + /* Class bottom, interface defines static + * + * I[](stat) + * C[]() + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(withStaticDef); + builder.hier.addInherit(C, I); + builder.methodref = C; + }, + /* Class bottom, interface defines private + * + * I[](priv) + * C[]() + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(withPrivateDef); + builder.hier.addInherit(C, I); + builder.methodref = C; + }); + + private static final Template NoMethodResolutionTemplateIfaceBottom = + new Template("NoMethodResolutionTemplate", + /* Empty single interface + * + * I[]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.methodref = I; + }, + /* Interface bottom, inherit empty interface + * + * I2[]() + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I1 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + final int I2 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(I1, I2); + builder.methodref = I1; + }, + /* Interface bottom, unrelated class defines + * + * C0[](con) + * I[]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.addClass(withDef); + builder.methodref = I; + }, + /* Interface bottom, interface defines static + * + * I2[](stat) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I1 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + final int I2 = builder.addInterface(withStaticDef); + builder.hier.addInherit(I1, I2); + builder.methodref = I1; + }, + /* Interface bottom, interface defines private + * + * I2[](stat) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I1 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + final int I2 = builder.addInterface(withPrivateDef); + builder.hier.addInherit(I1, I2); + builder.methodref = I1; + }); + + private static final Template NoMethodSelectionTemplateClassMethodref = + new Template("NoMethodSelectionTemplate", + /* objectref = methodref + * + * C[]() = mref = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + builder.objectref = builder.methodref; + }, + /* Inherit methodref + * + * C2[]() = mref + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.methodref; + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Inherit methodref and interface + * + * C2[]() = mref, I[]() + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C2 = builder.methodref; + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }, + /* objectref = methodref, unrelated class defines + * + * C0[](def) + * C[]() = mref = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + builder.addClass(withDef); + builder.objectref = builder.methodref; + }, + /* Inherit methodref, unrelated class defines + * + * C0[](def) + * C2[]() = mref + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.methodref; + builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Inherit methodref and interface, unrelated class defines. + * + * C0[](def) + * C2[]() = mref, I[]() + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C2 = builder.methodref; + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }, + /* objectref = methodref, unrelated interface defines + * + * I0[](def) + * C[]() = mref = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + builder.addInterface(withDef); + builder.objectref = builder.methodref; + }, + /* Inherit methodref, interface defines static + * + * C2[]() = mref, I0[](stat) + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.methodref; + final int I0 = builder.addInterface(withStaticDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I0); + builder.objectref = C1; + }, + /* Inherit methodref, interface defines private + * + * C2[]() = mref, I0[](stat) + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C1 = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int C2 = builder.methodref; + final int I0 = builder.addInterface(withPrivateDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I0); + builder.objectref = C1; + }); + + private static final Template NoMethodSelectionTemplateIfaceMethodref = + new Template("NoMethodSelectionTemplate", + /* Inherit methodref + * + * I[]() = mref + * C[I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.methodref; + builder.hier.addInherit(C, I); + builder.objectref = C; + }, + /* Inherit methodref and interface + * + * I1[]() = mref, I2[]() + * C[T,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I1 = builder.methodref; + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I2 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.objectref = C; + }, + /* Inherit methodref, unrelated class defines + * + * C0[](def) + * I[]() = mref + * C[I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.methodref; + builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + }, + /* Inherit methodref and interface, unrelated class defines + * + * C0[](def) + * I1[]() = mref, I2[]() + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int I1 = builder.methodref; + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I2 = builder.addInterface(Template.emptyClass(ClassData.Package.SAME)); + builder.addClass(withDef); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.objectref = C; + }, + /* Inherit methodref, interface defines static + * + * I[]() = mref, I0[](stat) + * C[I,I0]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.methodref; + final int I0 = builder.addInterface(withStaticDef); + builder.hier.addInherit(C, I); + builder.hier.addInherit(C, I0); + builder.objectref = C; + }, + /* Inherit methodref, unrelated class defines private + * + * I[]() = mref, I0[](priv) + * C[I,I0]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C = builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + final int I = builder.methodref; + final int I0 = builder.addInterface(withPrivateDef); + builder.hier.addInherit(C, I); + builder.hier.addInherit(C, I0); + builder.objectref = C; + }); + + private static final Collection testgroups = + Arrays.asList( + /* invokestatic tests */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + NoMethodResolutionTemplateClassBottom, + Template.AllCallsiteCases, + Template.TrivialObjectref), + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKESTATIC), + NoMethodResolutionTemplateIfaceBottom, + Template.CallsiteNotEqualsMethodref, + Template.TrivialObjectref), + /* invokevirtual tests */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + NoMethodResolutionTemplateClassBottom, + Template.AllCallsiteCases, + NoMethodSelectionTemplateClassMethodref), + /* invokeinterface tests */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + NoMethodResolutionTemplateIfaceBottom, + Template.CallsiteNotEqualsMethodref, + NoMethodSelectionTemplateIfaceMethodref), + + /* Hiding of private interface methods */ + /* invokevirtual */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEVIRTUAL), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.MethodrefNotEqualsExpectedIface, + Template.AllCallsiteCases, + Template.TrivialObjectref), + /* invokeinterface */ + new TestGroup.Simple(initBuilder, + Template.SetInvoke(SelectionResolutionTestCase.InvokeInstruction.INVOKEINTERFACE), + Template.ResultCombo(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PRIVATE), + EnumSet.of(MethodData.Context.INSTANCE, + MethodData.Context.ABSTRACT), + EnumSet.of(ClassData.Package.SAME, + ClassData.Package.DIFFERENT)), + Template.IfaceMethodrefNotEqualsExpected, + Template.AllCallsiteCases, + Template.TrivialObjectrefNotEqualMethodref) + ); + + private NoSuchMethodErrorTest() { + super(testgroups); + } + + public static void main(final String... args) { + new NoSuchMethodErrorTest().run(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Builder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Builder.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import java.util.HashMap; + +abstract class Builder { + protected final SelectionResolutionTestCase testcase; + protected final HierarchyShape hier; + protected final HashMap classdata; + + public Builder(SelectionResolutionTestCase testcase) { + this.testcase = testcase; + this.hier = testcase.hier; + this.classdata = testcase.classdata; + } + + protected String getName(int id) { + StringBuilder name = new StringBuilder(); + + name.append(getPackageName(classdata.get(id).packageId.ordinal())); + + // Name classes C and interfaces I + name.append(getClassName(id)); + + return name.toString(); + } + + protected String getPackageName(int packageId) { + return "P" + packageId + "/"; + } + + protected String getClassName(int id) { + // Name classes C and interfaces I + if (isClass(id)) { + return "C" + id; + } else { + return "I" + id; + } + } + + protected boolean isClass(int id) { + return hier.isClass(id); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ByteCodeClassLoader.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ByteCodeClassLoader.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; + + +public class ByteCodeClassLoader extends ClassLoader { + ArrayList classes = new ArrayList<>(); + HashMap loadedClasses = new HashMap<>(); + + public void addClasses(ClassConstruct... classes) { + this.classes.addAll(Arrays.asList(classes)); + } + + public void loadAll() throws ClassNotFoundException { + for (ClassConstruct clazz : classes) { + findClass(clazz.getDottedName()); + } + } + + + @Override + public Class findClass(String name) throws ClassNotFoundException { + + Class cls = loadedClasses.get(name); + + if (cls != null) { + return cls; + } + + for (ClassConstruct clazz : classes) { + if (clazz.getDottedName().equals(name)) { + return load(clazz); + } + } + + throw new ClassNotFoundException(name); + } + + @Override + public Class loadClass(String name) throws ClassNotFoundException { + try { + return findClass(name); + } catch (ClassNotFoundException e) { + return super.loadClass(name); + } + } + + private Class load(ClassConstruct clazz) { + byte[] bytecode = clazz.generateBytes(); + Class loadedClass = defineClass(clazz.getDottedName(), bytecode, 0, bytecode.length); + loadedClasses.put(clazz.getDottedName(), loadedClass); + return loadedClass; + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassBuilder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassBuilder.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import java.util.ArrayList; +import java.util.Iterator; + +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_ABSTRACT; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PRIVATE; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PROTECTED; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_STATIC; + +/** + * Constructs classes and interfaces based on the information from a + * DefaultMethodTestCase + * + */ +public class ClassBuilder extends Builder { + private final ArrayList classes; + + // Add a class in every package to be able to instantiate package + // private classes from outside the package + private final Clazz[] helpers = new Clazz[4]; + private ClassConstruct callsiteClass; + + public enum ExecutionMode { DIRECT, INDY, MH_INVOKE_EXACT, MH_INVOKE_GENERIC} + private final ExecutionMode execMode; + + public ClassBuilder(SelectionResolutionTestCase testcase, + ExecutionMode execMode) { + super(testcase); + this.classes = new ArrayList<>(); + this.execMode = execMode; + } + + public ClassConstruct[] build() throws Exception { + buildClassConstructs(); + return classes.toArray(new ClassConstruct[0]); + } + + public ClassConstruct getCallsiteClass() { + return callsiteClass; + } + + private void buildClassConstructs() throws Exception { + TestBuilder tb = new TestBuilder(testcase.methodref, testcase); + + classes.add(new Clazz("Test", ACC_PUBLIC, -1)); + + for (int classId = 0; classId < classdata.size(); classId++) { + ClassConstruct C; + String[] interfaces = getInterfaces(classId); + ClassData data = classdata.get(classId); + + if (isClass(classId)) { + C = new Clazz(getName(classId), + getExtending(classId), + getClassModifiers(data), + classId, + interfaces); + + addHelperMethod(classId); + + } else { + C = new Interface(getName(classId), + getAccessibility(data.access), + classId, interfaces); + } + + // Add a method "m()LTestObject;" if applicable + if (containsMethod(data)) { + // Method will either be abstract or concrete depending on the + // abstract modifier + C.addTestMethod(getMethodModifiers(data)); + } + + if (classId == testcase.callsite) { + // Add test() method + tb.addTest(C, execMode); + callsiteClass = C; + } + + classes.add(C); + } + classes.add(tb.getMainTestClass()); + + } + + private void addHelperMethod(int classId) { + int packageId = classdata.get(classId).packageId.ordinal(); + Clazz C = helpers[packageId]; + if (C == null) { + C = new Clazz(getPackageName(packageId) + "Helper", -1, ACC_PUBLIC); + helpers[packageId] = C; + classes.add(C); + } + + Method m = C.addMethod("get" + getClassName(classId), + "()L" + getName(classId) + ";", + ACC_PUBLIC + ACC_STATIC); + m.makeInstantiateMethod(getName(classId)); + } + + private String[] getInterfaces(int classId) { + ArrayList interfaces = new ArrayList<>(); + + // Figure out if we're extending/implementing an interface + for (final int intf : hier.interfaces()) { + if (hier.inherits(classId, intf)) { + interfaces.add(getName(intf)); + } + } + return interfaces.toArray(new String[0]); + } + + private String getExtending(int classId) { + int extending = -1; + + // See if we're extending another class + for (final int extendsClass : hier.classes()) { + if (hier.inherits(classId, extendsClass)) { + // Sanity check that we haven't already found an extending class + if (extending != -1) { + throw new RuntimeException("Multiple extending classes"); + } + extending = extendsClass; + } + } + + return extending == -1 ? null : getName(extending); + } + + /** + * Returns modifiers for a Class + * @param cd ClassData for the Class + * @return ASM modifiers for a Class + */ + private int getClassModifiers(ClassData cd) { + // For Classes we only care about accessibility (public, private etc) + return getAccessibility(cd.access) | getAbstraction(cd.abstraction); + } + + /** + * Returns modifiers for Method type + * @param cd ClassData for the Class or Interface where the Method resides + * @return ASM modifiers for the Method + */ + private int getMethodModifiers(ClassData cd) { + int mod = 0; + + // For methods we want everything + mod += getAccessibility(cd.methoddata.access); + mod += getAbstraction(cd.methoddata.context); + mod += getContext(cd.methoddata.context); + mod += getExtensibility(); + return mod; + } + + + /** + * Convert ClassData access type to ASM + * @param access + * @return ASM version of accessibility (public / private / protected) + */ + private int getAccessibility(MethodData.Access access) { + switch(access) { + case PACKAGE: + //TODO: Do I need to set this or will this be the default? + return 0; + case PRIVATE: + return ACC_PRIVATE; + case PROTECTED: + return ACC_PROTECTED; + case PUBLIC: + return ACC_PUBLIC; + default: + throw new RuntimeException("Illegal accessibility modifier: " + access); + } + } + + /** + * Convert ClassData abstraction type to ASM + * @param abstraction + * @return ASM version of abstraction (abstract / non-abstract) + */ + private int getAbstraction(MethodData.Context context) { + return context == MethodData.Context.ABSTRACT ? ACC_ABSTRACT : 0; + } + + /** + * Convert ClassData context type to ASM + * @param context + * @return ASM version of context (static / non-static) + */ + private int getContext(MethodData.Context context) { + return context == MethodData.Context.STATIC ? ACC_STATIC : 0; + } + + /** + * Convert ClassData extensibility type to ASM + * @param extensibility + * @return ASM version of extensibility (final / non-final) + */ + private int getExtensibility() { + return 0; + } + + /** + * Determine if we need a method at all, abstraction is set to null if this + * Class/Interface should not have a test method + * @param cd + * @return + */ + private boolean containsMethod(ClassData cd) { + return cd.methoddata != null; + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassConstruct.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassConstruct.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import java.io.File; +import java.io.FileOutputStream; +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.Opcodes; + +public abstract class ClassConstruct { + private final ClassWriter cw; + private final String name; + private final boolean isInterface; + private final int index; + + /** + * Base constructor for building a Class or Interface + * @param name Name of Class/Interface, including package name + * @param extending Name of extending Class if any + * @param access Access for Class/Interface + * @param classFileVersion Class file version + * @param interfaces Interface implemented + */ + public ClassConstruct(String name, + String extending, + int access, + int classFileVersion, + int index, + String... interfaces) { + this.name = name; + isInterface = (access & Opcodes.ACC_INTERFACE) == Opcodes.ACC_INTERFACE; + cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES); + cw.visit(classFileVersion, access, name, null, extending, interfaces == null ? new String[] { } : interfaces); + this.index = index; + } + + /** + * Get full Class/Interface name including package name, as it + * should appear in a classfile. + * + * @return The full Class/Interface name including package name + */ + public String getName() { + return name; + } + + /** + * Get the name of the class, including package as it would appear + * in Java source. + * + * @return The name of the class as it would appear in Java source. + */ + public String getDottedName() { + return name.replace("/", "."); + } + + public String getPackageName() { + final int idx = name.lastIndexOf('/'); + if (idx != -1) { + return name.substring(0, name.indexOf('/')); + } else { + return null; + } + } + + public String getClassName() { + final int idx = name.lastIndexOf('/'); + if (idx != -1) { + return name.substring(name.indexOf('/')); + } else { + return name; + } + } + + /** + * Add a method, no code associated with it yet + * @param name Name of method + * @param descriptor Descriptor for method + * @param access Access for the method + * @return Method object that can be used for constructing a method body + */ + public Method addMethod(String name, + String descriptor, + int access) { + return addMethod(name, descriptor, access, null); + } + + /** + * Add a method, no code associated with it yet + * @param name Name of method + * @param descriptor Descriptor for method + * @param access Access for the method + * @param execMode The execution mode for the method. + * @return Method object that can be used for constructing a method body + */ + public Method addMethod(String name, + String descriptor, + int access, + ClassBuilder.ExecutionMode execMode) { + return new Method(this, cw, name, descriptor, access, execMode); + } + + /** + * Adds a m()LTestObject; method which returns null unless the method is abstract + * @param access Access for the method + */ + public void addTestMethod(int access) { + Method m = new Method(this, cw, Method.defaultMethodName, Method.defaultMethodDescriptor, access, null); + if ((access & Opcodes.ACC_ABSTRACT) != Opcodes.ACC_ABSTRACT) { + m.makeDefaultMethod(); + } + } + + /** + * Construct the class to a byte[] + * @return byte[] with class file + */ + public byte[] generateBytes() { + cw.visitEnd(); + return cw.toByteArray(); + } + + /** + * Write out a class to a file in the specified directory. + * + * @param dir Directory to which to write out the file. + */ + public void writeClass(final File dir) throws Exception { + final String pkgname = getPackageName(); + final File pkgdir = pkgname != null ? new File(dir, getPackageName()) : dir; + pkgdir.mkdirs(); + final File out = new File(pkgdir, getClassName() + ".class"); + out.createNewFile(); + try (final FileOutputStream fos = new FileOutputStream(out)) { + fos.write(generateBytes()); + } + } + + public boolean isInterface() { + return isInterface; + } + + public Integer getIndex() { + return index; + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassData.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/ClassData.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package selectionresolution; + +/** + * A representation of information about a class. Note that classes + * here define only one method. + */ +public class ClassData { + + public enum Package { + /** + * Same package as the callsite. + */ + SAME, + /** + * Different package from the callsite. + */ + DIFFERENT, + /** + * Same as DIFFERENT, and also implies that the class access + * is package-private. + */ + INACCESSIBLE, + /** + * Different from everything else. Used in selection only, to + * test skipping package-private definitions. + */ + OTHER, + /** + * Placeholder, used solely by the template dumper for + * printing out the effects of templates. Don't use for + * anything else. + */ + PLACEHOLDER; + } + + /** + * The package ID for the class. + */ + public final Package packageId; + + /** + * The method data for the method definition. If there is no + * method definition, this will be null. + */ + public final MethodData methoddata; + + /** + * The class access. Note that this is controlled by the packageId. + */ + public final MethodData.Access access; + + // This is a hardwired value necessary for ClassBuilder + public final MethodData.Context abstraction = MethodData.Context.INSTANCE; + + public ClassData(final Package packageId, + final MethodData methoddata) { + this.packageId = packageId; + this.methoddata = methoddata; + + if (packageId == Package.INACCESSIBLE) + access = MethodData.Access.PACKAGE; + else + access = MethodData.Access.PUBLIC; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(" { "); + + if (methoddata != null) { + sb.append(methoddata); + } + + sb.append(" }\n\n"); + + return sb.toString(); + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Clazz.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Clazz.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_SUPER; +import static jdk.internal.org.objectweb.asm.Opcodes.V1_8; + + +class Clazz extends ClassConstruct { + + /** + * Construct a Class + * @param name Name of Class + * @param access Access for the Class + */ + public Clazz(String name, int access, int index) { + this(name, null, access, V1_8, index, new String[] { }); + } + + /** + * Construct a Class + * @param name Name of Class + * @param extending Class being extended + * @param access Access for the Class + */ + public Clazz(String name, String extending, int access, int index) { + this(name, extending, access, V1_8, index, new String[] { }); + } + + /** + * Construct a Class + * @param name Name of Class + * @param extending Class being extended + * @param access access for the Class + * @param implementing Interfaces implemented + */ + public Clazz(String name, String extending, int access, int index, String... implementing) { + this(name, extending, access, V1_8, index, implementing); + } + + /** + * Construct a Class + * @param name Name of Class + * @param extending Class being extended + * @param access Access for the Class + * @param classFileVersion Class file version + * @param implementing Interfaces implemented + */ + public Clazz(String name, String extending, int access, int classFileVersion, int index, String... implementing) { + super(name, extending == null ? "java/lang/Object" : extending, access + ACC_SUPER, classFileVersion, index, implementing); + // Add the default constructor + addMethod("", "()V", ACC_PUBLIC).makeConstructor(extending); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/HierarchyShape.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/HierarchyShape.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Set; +import java.util.Map; + +/** + * A representation of a class/interface hierarchy graph (just the + * graph; the class data is represented elsewhere). + */ +public class HierarchyShape { + public static final int OBJECT_CLASS = -1; + + protected int maxId; + + /** + * The names of all the classes. + */ + private final HashSet classes; + + /** + * The names of all the interfaces. + */ + private final HashSet interfaces; + private final HashMap> extensions; + + /** + * Create an empty hierarchy shape. + */ + public HierarchyShape() { + this(0, new HashSet<>(), new HashSet<>(), new HashMap<>()); + } + + private HierarchyShape(final int maxId, + final HashSet classes, + final HashSet interfaces, + final HashMap> extensions) { + this.maxId = maxId; + this.classes = classes; + this.interfaces = interfaces; + this.extensions = extensions; + } + + /** + * Make a copy of this hierarchy shape. + */ + public HierarchyShape copy() { + final HashMap> newextensions = new HashMap<>(); + + for(final Map.Entry> entry : + extensions.entrySet()) { + newextensions.put(entry.getKey(), + (HashSet)entry.getValue().clone()); + } + + return new HierarchyShape(maxId, (HashSet) classes.clone(), + (HashSet) interfaces.clone(), + newextensions); + } + + /** + * Add a class, and return its id. + * + * @return The new class id. + */ + public int addClass() { + final int id = maxId++; + classes.add(id); + return id; + } + + /** + * Add an interface, and return its id. + * + * @return The new interface id. + */ + public int addInterface() { + final int id = maxId++; + interfaces.add(id); + return id; + } + + /** + * Add an inheritance. + * + * @param sub The sub class/interface. + * @param sup The super class/interface + */ + public void addInherit(final int sub, + final int sup) { + HashSet ext = extensions.get(sub); + + if (ext == null) { + ext = new HashSet<>(); + extensions.put(sub, ext); + } + + ext.add(sup); + } + + @Override + public String toString() { + String out = ""; + for(int i = maxId - 1; i >= 0; i--) { + out += i + ": "; + for(int j = 0; j < maxId; j++) { + out += "[" + (inherits(i, j) ? "1" : "0") + "]"; + } + out += "\n"; + } + return out; + } + + /** + * Indicate whether the first class inherits from the second. + * + * @param sub The possible subtype. + * @param sup The possible supertype. + * @return Whether or not {@code sub} inherits from {@code sup}. + */ + public boolean inherits(final int sub, final int sup) { + final Set ext = extensions.get(sub); + if (ext != null) { + return ext.contains(sup); + } else { + return false; + } + } + + /** + * Indicate whether a given type name is a class. + * + * @param id The type in question. + * @return Whether or not the type is a class. + */ + public boolean isClass(final int id) { + if (id == OBJECT_CLASS) { + return true; + } + return classes.contains(id); + } + + /** + * Indicate whether a given type name is an interface. + * + * @param id The type in question. + * @return Whether or not the type is an interface. + */ + public boolean isInterface(final int id) { + if (id == OBJECT_CLASS) { + return false; + } + return interfaces.contains(id); + } + + /** + * Get an iterator over the classes. + * + * @return An iterator over classes. + */ + public Collection classes() { + return classes; + } + + /** + * Get an iterator over the interfaces. + * + * @return An iterator over interfaces. + */ + public Collection interfaces() { + return interfaces; + } + + /** + * Get an iterator over all types. + * + * @return An iterator over all types. + */ + public Collection types() { + final Set combined = new HashSet(classes); + combined.addAll(interfaces); + return combined; + } + + public int numClasses() { + return classes.size(); + } + + public int numInterfaces() { + return interfaces.size(); + } + + public int numTypes() { + return numClasses() + numInterfaces(); + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Interface.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Interface.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_ABSTRACT; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_INTERFACE; +import static jdk.internal.org.objectweb.asm.Opcodes.V1_8; + +class Interface extends ClassConstruct { + + public Interface(String name, int access, int index) { + this(name, V1_8, access, index, (String)null); + } + + public Interface(String name, int index) { + this(name, V1_8, index, (String)null); + } + + + public Interface(String name, int access, int index, String... extending) { + this(name, V1_8, access, index, extending); + } + + public Interface(String name, int classFileVersion, int access, int index, String... extending) { + super(name, "java/lang/Object", access + ACC_ABSTRACT + ACC_INTERFACE, classFileVersion, index, extending); + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Method.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Method.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import jdk.internal.org.objectweb.asm.ClassVisitor; +import jdk.internal.org.objectweb.asm.Handle; +import jdk.internal.org.objectweb.asm.MethodVisitor; + +import java.lang.invoke.CallSite; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; + +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_STATIC; +import static jdk.internal.org.objectweb.asm.Opcodes.ALOAD; +import static jdk.internal.org.objectweb.asm.Opcodes.ARETURN; +import static jdk.internal.org.objectweb.asm.Opcodes.DUP; +import static jdk.internal.org.objectweb.asm.Opcodes.POP; +import static jdk.internal.org.objectweb.asm.Opcodes.NEW; +import static jdk.internal.org.objectweb.asm.Opcodes.SWAP; +import static jdk.internal.org.objectweb.asm.Opcodes.ASTORE; +import static jdk.internal.org.objectweb.asm.Opcodes.RETURN; +import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESPECIAL; +import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESTATIC; +import static jdk.internal.org.objectweb.asm.Opcodes.INVOKEINTERFACE; +import static jdk.internal.org.objectweb.asm.Opcodes.INVOKEVIRTUAL; +import static jdk.internal.org.objectweb.asm.Opcodes.H_INVOKESPECIAL; +import static jdk.internal.org.objectweb.asm.Opcodes.H_INVOKESTATIC; +import static jdk.internal.org.objectweb.asm.Opcodes.H_INVOKEINTERFACE; +import static jdk.internal.org.objectweb.asm.Opcodes.H_INVOKEVIRTUAL; + +class Method { + public static final String defaultMethodName = "m"; + public static final String defaultMethodDescriptor = "()Ljava/lang/Integer;"; + public static final String methodDescriptorTemplate = "(L%s;)Ljava/lang/Integer;"; + private final ClassConstruct ownerClass; + private final String ownerClassName; + private final ClassVisitor cv; + private final MethodVisitor mv; + private final boolean isInterface; + private final ClassBuilder.ExecutionMode execMode; + + public Method(ClassConstruct ownerClass, ClassVisitor cv, String name, String descriptor, int access, + ClassBuilder.ExecutionMode execMode) { + this.ownerClassName = ownerClass.getName(); + this.ownerClass = ownerClass; + this.isInterface = ownerClass.isInterface(); + this.execMode = execMode; + this.cv = cv; + mv = cv.visitMethod(access, name, descriptor, null, null); + mv.visitCode(); + } + /** + * Add code for the m()Ljava/lang/Integer; method, always returns null + */ + public void makeDefaultMethod() { + mv.visitTypeInsn(NEW, "java/lang/Integer"); + mv.visitInsn(DUP); + mv.visitLdcInsn(ownerClass.getIndex()); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Integer", "", "(I)V"); + mv.visitInsn(ARETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + public void makePrivateCallMethod(String className) { + makeSuperCallMethod(INVOKESPECIAL, className); + } + + public void makeSuperCallMethod(int invokeInstruction, String className) { + mv.visitVarInsn(ALOAD, 0); + makeCall(invokeInstruction, className); + mv.visitInsn(POP); + done(); + } + + public void defaultInvoke(int instr, String className, String objectRef) { + switch (instr) { + case INVOKEVIRTUAL: + defaultInvokeVirtual(className, objectRef); + break; + case INVOKEINTERFACE: + defaultInvokeInterface(className, objectRef); + break; + case INVOKESTATIC: + defaultInvokeStatic(className); + break; + case INVOKESPECIAL: + defaultInvokeSpecial(className, objectRef); + break; + default: + break; + } + mv.visitInsn(ARETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + public void defaultInvokeVirtual(String className, String objectRef) { + String objectRefPackageName = objectRef.substring(0, objectRef.lastIndexOf("/")); + makeNewObject(objectRef, objectRefPackageName); + makeCall(INVOKEVIRTUAL, className, false); + } + + public void defaultInvokeInterface(String className, String objectRef) { + String objectRefPackageName = objectRef.substring(0, objectRef.lastIndexOf("/")); + makeNewObject(objectRef, objectRefPackageName); + makeCall(INVOKEINTERFACE, className, true); + } + + public void defaultInvokeSpecial(String className, String objectRef) { + String objectRefPackageName = objectRef.substring(0, objectRef.lastIndexOf("/")); + makeNewObject(objectRef, objectRefPackageName); + makeCall(INVOKESPECIAL, className, false); + } + + public void defaultInvokeStatic(String className) { + makeCall(INVOKESTATIC, className); + } + + private Method makeCall(int invokeInstruction, String className) { + return makeCall(invokeInstruction, className, isInterface); + } + + private Method makeCall(int invokeInstruction, String className, boolean isInterface) { + switch(execMode) { + case DIRECT: { + mv.visitMethodInsn(invokeInstruction, className, defaultMethodName, defaultMethodDescriptor, isInterface); + break; + } + case INDY: { + Handle m = convertToHandle(invokeInstruction, className, defaultMethodName, defaultMethodDescriptor); + Handle bsm = generateBootstrapMethod(m); + mv.visitInvokeDynamicInsn(defaultMethodName, defaultMethodDescriptor, bsm); + break; + } + case MH_INVOKE_EXACT: + case MH_INVOKE_GENERIC: { + String invokerName = execMode == ClassBuilder.ExecutionMode.MH_INVOKE_GENERIC + ? "invoke" : "invokeExact"; + + Handle m = convertToHandle(invokeInstruction, className, defaultMethodName, defaultMethodDescriptor); + mv.visitLdcInsn(m); + mv.visitInsn(SWAP); + mv.visitMethodInsn(INVOKEVIRTUAL, + "java/lang/invoke/MethodHandle", + invokerName, + String.format(methodDescriptorTemplate, className), + false); + break; + } + default: + throw new Error("Unknown execution mode: " + execMode); + + } + return this; + } + + private Handle generateBootstrapMethod(Handle h) { + String bootstrapName = "bootstrapMethod"; + MethodType bootstrapType = MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class); + + MethodVisitor bmv = cv.visitMethod(ACC_PUBLIC | ACC_STATIC, bootstrapName, bootstrapType.toMethodDescriptorString(), null, null); + bmv.visitCode(); + + String constCallSite = "java/lang/invoke/ConstantCallSite"; + bmv.visitTypeInsn(NEW, constCallSite); + bmv.visitInsn(DUP); + + bmv.visitLdcInsn(h); + + bmv.visitMethodInsn(INVOKESPECIAL, constCallSite, "", "(Ljava/lang/invoke/MethodHandle;)V", false); + bmv.visitInsn(ARETURN); + + bmv.visitMaxs(0,0); + bmv.visitEnd(); + + return new Handle(H_INVOKESTATIC, ownerClassName, bootstrapName, bootstrapType.toMethodDescriptorString()); + } + + + private static Handle convertToHandle(int invokeInstruction, String className, String methodName, String methodDesc) { + int tag; + switch (invokeInstruction) { + case INVOKEVIRTUAL: tag = H_INVOKEVIRTUAL; break; + case INVOKEINTERFACE: tag = H_INVOKEINTERFACE; break; + case INVOKESPECIAL: tag = H_INVOKESPECIAL; break; + case INVOKESTATIC: tag = H_INVOKESTATIC; break; + default: + throw new Error("Unknown invoke instruction: "+invokeInstruction); + } + + return new Handle(tag, className, methodName, methodDesc); + } + + private void makeNewObject(String objectRef, String objectRefPackageName) { + String className = objectRef.substring(objectRef.lastIndexOf("/") + 1); + makeStaticCall( objectRefPackageName + "/Helper", + "get" + className, + "()L" + objectRef + ";"); + mv.visitVarInsn(ASTORE, 1); + mv.visitVarInsn(ALOAD, 1); + } + + public void makeTestCall(String className) { + mv.visitTypeInsn(NEW, className); + mv.visitInsn(DUP); + mv.visitMethodInsn(INVOKESPECIAL, className, "", "()V", false); + mv.visitVarInsn(ASTORE, 1); + mv.visitVarInsn(ALOAD, 1); + mv.visitMethodInsn(INVOKEVIRTUAL, className, "test", "()Ljava/lang/Integer;", false); + mv.visitInsn(RETURN); + mv.visitMaxs(2, 2); + mv.visitEnd(); + } + + public Method makeStaticCall(String classname, String method, String descriptor) { + mv.visitMethodInsn(INVOKESTATIC, classname, method, descriptor, isInterface); + return this; + } + + public void makeConstructor(String extending) { + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, extending == null ? "java/lang/Object" : extending, "", "()V", isInterface); + mv.visitInsn(RETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + public void makeInstantiateMethod(String className) { + mv.visitTypeInsn(NEW, className); + mv.visitInsn(DUP); + mv.visitMethodInsn(INVOKESPECIAL, className, "", "()V", false); + mv.visitInsn(ARETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } + + public void done() { + mv.visitInsn(RETURN); + mv.visitMaxs(0, 0); + mv.visitEnd(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/MethodData.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/MethodData.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +/** + * A representation of a method definition. + */ +public class MethodData { + + public enum Access { + PUBLIC(1), + PACKAGE(0), + PROTECTED(4), + PRIVATE(2), + /** + * Placeholder, used solely for printing out the effects of + * templates. Don't use. + */ + PLACEHOLDER(-1); + + public final int flag; + + Access(int flag) { + this.flag = flag; + } + } + + public enum Context { + ABSTRACT, + INSTANCE, + STATIC, + /** + * Placeholder, used solely for printing out the effects of + * templates. Don't use. + */ + PLACEHOLDER; + }; + + /** + * Access for the method. + */ + public final Access access; + + /** + * Context (static, instance, abstract) for the method. + */ + public final Context context; + + /** + * Create method data. + */ + public MethodData(final Access access, + final Context context) { + + this.access = access; + this.context = context; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + switch (access) { + case PUBLIC: sb.append("public"); break; + case PACKAGE: sb.append("package"); break; + case PROTECTED: sb.append("protected"); break; + case PRIVATE: sb.append("private"); break; + case PLACEHOLDER: sb.append(" _"); break; + default: throw new RuntimeException("Impossible case"); + } + + switch (context) { + case STATIC: sb.append(" static"); break; + case INSTANCE: sb.append(" instance"); break; + case ABSTRACT: sb.append(" abstract"); break; + case PLACEHOLDER: sb.append(" _"); break; + default: throw new RuntimeException("Impossible case"); + } + sb.append(" Integer m();"); + + return sb.toString(); + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Result.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Result.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package selectionresolution; + +import java.util.Arrays; +import java.util.HashSet; + +/** + * Representation of an expected result. + */ +public interface Result { + public static final Result ICCE = new Exception(IncompatibleClassChangeError.class); + public static final Result IAE = new Exception(IllegalAccessError.class); + public static final Result NSME = new Exception(NoSuchMethodError.class); + public static final Result AME = new Exception(AbstractMethodError.class); + + // Factories + + /** + * Create a result that expects the given class. + */ + public static Result is(int id) { + return new Single(id); + } + + /** + * Create a result that expects the given classes. + */ + public static Result is(int... multiple) { + assert multiple.length > 0; + + if (multiple.length == 1) { + return new Single(multiple[0]); + } else { + return new Any(multiple); + } + } + + /** + * Create a result that expects the given exception to be thrown. + */ + public static Result is(Class exType) { + return new Exception(exType); + } + + /** + * Create a result that expects the given exception to be thrown. + */ + public static Result is(Throwable ex) { + return Result.is(ex.getClass()); + } + + public static final Result EMPTY = new Empty(); + + /** + * Create an empty Result. + */ + public static Result empty() { + return EMPTY; + } + + + public boolean complyWith(int i); + public boolean complyWith(Throwable e); + public boolean complyWith(Result r); + + static class Empty implements Result { + @Override + public boolean complyWith(int i) { + return false; + } + + @Override + public boolean complyWith(Throwable e) { + return false; + } + + @Override + public boolean complyWith(Result r) { + return false; + } + } + + static class Single implements Result { + public int id; + + public Single(int id) { + this.id = id; + } + + @Override + public boolean complyWith(int i) { + return id == i; + } + + @Override + public boolean complyWith(Throwable e) { + return false; + } + + @Override + public boolean complyWith(Result r) { + if (r instanceof Single) { + return complyWith(((Single)r).id); + } else if (r instanceof Any) { + return r.complyWith(this); + } + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Single)) return false; + + Single single = (Single) o; + + return (id == single.id); + } + + @Override + public int hashCode() { + return id; + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("Result=Single{"); + sb.append("id=").append(id); + sb.append('}'); + return sb.toString(); + } + } + + static class Any implements Result { + public int[] ids; + public Any(int[] ids) { + this.ids = ids; + } + + @Override + public boolean complyWith(int i) { + return Arrays.stream(ids) + .anyMatch(j -> j == i); + } + + @Override + public boolean complyWith(Throwable e) { + return false; + } + + @Override + public boolean complyWith(Result r) { + if (r instanceof Single) { + return complyWith(((Single)r).id); + } + if (r instanceof Any) { + return Arrays.equals(ids, ((Any) r).ids); + } + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Any any = (Any) o; + + return Arrays.equals(ids, any.ids); + } + + @Override + public int hashCode() { + return Arrays.hashCode(ids); + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("Result=Any{"); + sb.append("ids="); + if (ids == null) sb.append("null"); + else { + sb.append('['); + for (int i = 0; i < ids.length; ++i) + sb.append(i == 0 ? "" : ", ").append(ids[i]); + sb.append(']'); + } + sb.append('}'); + return sb.toString(); + } + } + + static class Exception implements Result { + public Class exc; + public Exception(Class e) { + this.exc = e; + } + + @Override + public boolean complyWith(int i) { + return false; + } + + @Override + public boolean complyWith(Throwable e) { + return exc.isAssignableFrom(e.getClass()); + } + + @Override + public boolean complyWith(Result r) { + if (r instanceof Exception) { + return exc.isAssignableFrom(((Exception) r).exc); + } + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Exception)) return false; + + Exception exception = (Exception) o; + + return exc.equals(exception.exc); + } + + @Override + public int hashCode() { + return exc.hashCode(); + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("Result=Exception{"); + sb.append("exc=").append(exc); + sb.append('}'); + return sb.toString(); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/SelectionResolutionTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/SelectionResolutionTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package selectionresolution; + +import java.util.function.Consumer; +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; + +/** + * A master superclass for all selection/resolution tests. Contains a + * couple of standard definitions that make writing these tests + * easier. + */ +public abstract class SelectionResolutionTest { + + /** + * A unified output function, to ensure that all output goes to + * the right string (System.err). + * + * @param str The line to print. + */ + protected void println(final String str) { + System.err.println(str); + } + + /** + * A test group is a generator for a set of tests that should + * share common characteristics. The Simple class provides a + * default implementation that should work for most purposes. + */ + public static interface TestGroup { + /** + * Given an action that runs a given test case, generate and + * run all cases in this test group. + */ + public void runCases(Consumer runner); + + /** + * The basic implementation of TestGroup. Produces one case + * for every possible combination of cases from each of its + * templates, by running them in order on an empty + * SelectionResolutionTestCase.Builder. This should be good + * enough for writing most tests. + */ + public static class Simple implements TestGroup { + private final Template[] templates; + private final SelectionResolutionTestCase.Builder initBuilder; + + public Simple(final SelectionResolutionTestCase.Builder initBuilder, + final Template... templates) { + this.templates = templates; + this.initBuilder = initBuilder; + } + + @Override + public void runCases(final Consumer runner) { + Consumer curr = (builder) -> { + runner.accept(builder.build()); + }; + + for(int i = templates.length - 1; i >= 0; i--) { + final Consumer next = curr; + final Template template = templates[i]; + curr = (builder) -> { + template.runCases(next, builder); + }; + } + + curr.accept(initBuilder); + } + } + } + + private final List errs = new LinkedList(); + + private final Collection testGroups; + + private int testcount = 0; + + /** + * Create a test from a set of test groups. Most actual tests can + * just define the test groups and pass them into this + * constructor, then call run. + */ + protected SelectionResolutionTest(final Collection testGroups) { + this.testGroups = testGroups; + } + + /** + * Run all the tests, report errors if they happen. + */ + protected void run() { + testGroups.stream().forEach( + (group) -> { + group.runCases((final SelectionResolutionTestCase testcase) -> { + testcount++; + final String err = testcase.run(); + + if (err != null) { + errs.add(err); + } + }); + }); + + println("Ran " + testcount + " cases"); + + if(!errs.isEmpty()) { + println("Errors occurred in test:"); + for(final String err : errs) { + println(err); + } + throw new RuntimeException("Errors occurred in test"); + } else { + println("All test cases succeeded"); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/SelectionResolutionTestCase.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/SelectionResolutionTestCase.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package selectionresolution; + +import java.io.File; +import java.io.FileWriter; +import java.util.HashMap; + +/** + * One individual test case. This class also defines a builder, which + * can be used to build up cases. + */ +public class SelectionResolutionTestCase { + + public enum InvokeInstruction { + INVOKESTATIC, + INVOKESPECIAL, + INVOKEINTERFACE, + INVOKEVIRTUAL; + } + + /** + * The class data (includes interface data). + */ + public final HashMap classdata; + /** + * The hierarchy shape. + */ + public final HierarchyShape hier; + /** + * The invoke instruction to use. + */ + public final InvokeInstruction invoke; + /** + * Which class is the methodref (or interface methodref). + */ + public final int methodref; + /** + * Which class is the objectref. + */ + public final int objectref; + /** + * Which class is the callsite (this must be a class, not an interface. + */ + public final int callsite; + /** + * The expected result. + */ + public final Result result; + + private SelectionResolutionTestCase(final HashMap classdata, + final HierarchyShape hier, + final InvokeInstruction invoke, + final int methodref, + final int objectref, + final int callsite, + final int expected) { + this.classdata = classdata; + this.hier = hier; + this.invoke = invoke; + this.methodref = methodref; + this.objectref = objectref; + this.callsite = callsite; + this.result = Result.is(expected); + } + + private SelectionResolutionTestCase(final HashMap classdata, + final HierarchyShape hier, + final InvokeInstruction invoke, + final int methodref, + final int objectref, + final int callsite, + final Result result) { + this.classdata = classdata; + this.hier = hier; + this.invoke = invoke; + this.methodref = methodref; + this.objectref = objectref; + this.callsite = callsite; + this.result = result; + } + + private static int currError = 0; + + private String dumpClasses(final ClassConstruct[] classes) + throws Exception { + final String errorDirName = "error_" + currError++; + final File errorDir = new File(errorDirName); + errorDir.mkdirs(); + for (int i = 0; i < classes.length; i++) { + classes[i].writeClass(errorDir); + } + try (final FileWriter fos = + new FileWriter(new File(errorDir, "description.txt"))) { + fos.write(this.toString()); + } + return errorDirName; + } + + /** + * Run this case, return an error message, or null. + * + * @return An error message, or null if the case succeeded. + */ + public String run() { + /* Uncomment this line to print EVERY case */ + //System.err.println("Running\n" + this); + final ClassBuilder builder = + new ClassBuilder(this, ClassBuilder.ExecutionMode.DIRECT); + try { + final ByteCodeClassLoader bcl = new ByteCodeClassLoader(); + final ClassConstruct[] classes = builder.build(); + + try { + bcl.addClasses(classes); + bcl.loadAll(); + + // Grab the callsite class. + final Class testclass = + bcl.findClass(builder.getCallsiteClass().getDottedName()); + + // Get the 'test' method out of it and call it. The + // return value tess which class that got selected. + final java.lang.reflect.Method method = + testclass.getDeclaredMethod("test"); + final int actual = (Integer) method.invoke(null); + // Check the result. + if (!result.complyWith(actual)) { + final String dump = dumpClasses(classes); + return "Failed:\n" + this + "\nExpected " + result + " got " + actual + "\nClasses written to " + dump; + } + } catch (Throwable t) { + // This catch block is handling exceptions that we + // might expect to see. + final Throwable actual = t.getCause(); + if (actual == null) { + final String dump = dumpClasses(classes); + System.err.println("Unexpected exception in test\n" + this + "\nClasses written to " + dump); + throw t; + } else if (result == null) { + final String dump = dumpClasses(classes); + return "Failed:\n" + this + "\nUnexpected exception " + actual + "\nClasses written to " + dump; + } else if (!result.complyWith(actual)) { + final String dump = dumpClasses(classes); + return "Failed:\n" + this + "\nExpected " + this.result + " got " + actual + "\nClasses written to " + dump; + } + } + } catch(Throwable e) { + throw new RuntimeException(e); + } + return null; + } + + private static void addPackage(final StringBuilder sb, + final ClassData cd) { + switch (cd.packageId) { + case SAME: sb.append("Same."); break; + case DIFFERENT: sb.append("Different."); break; + case OTHER: sb.append("Other."); break; + case PLACEHOLDER: sb.append("_."); break; + default: throw new RuntimeException("Impossible case"); + } + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + //sb.append("hierarchy:\n" + hier + "\n"); + sb.append("invoke: " + invoke + "\n"); + if (methodref != -1) { + if (hier.isClass(methodref)) { + sb.append("methodref: C" + methodref + "\n"); + } else { + sb.append("methodref: I" + methodref + "\n"); + } + } + if (objectref != -1) { + if (hier.isClass(objectref)) { + sb.append("objectref: C" + objectref + "\n"); + } else { + sb.append("objectref: I" + objectref + "\n"); + } + } + if (callsite != -1) { + if (hier.isClass(callsite)) { + sb.append("callsite: C" + callsite + "\n"); + } else { + sb.append("callsite: I" + callsite + "\n"); + } + } + sb.append("result: " + result + "\n"); + sb.append("classes:\n\n"); + + for(int i = 0; classdata.containsKey(i); i++) { + final ClassData cd = classdata.get(i); + + if (hier.isClass(i)) { + sb.append("class "); + addPackage(sb, cd); + sb.append("C" + i); + } else { + sb.append("interface "); + addPackage(sb, cd); + sb.append("I" + i); + } + + boolean first = true; + for(final int j : hier.classes()) { + if (hier.inherits(i, j)) { + if (first) { + sb.append(" extends C" + j); + } else { + sb.append(", C" + j); + } + } + } + + first = true; + for(final int j : hier.interfaces()) { + if (hier.inherits(i, j)) { + if (first) { + sb.append(" implements I" + j); + } else { + sb.append(", I" + j); + } + } + } + + sb.append(cd); + } + + return sb.toString(); + } + + /** + * A builder, facilitating building up test cases. + */ + public static class Builder { + /** + * A map from class (or interface) id's to ClassDatas + */ + public final HashMap classdata; + /** + * The hierarchy shape. + */ + public final HierarchyShape hier; + /** + * Which invoke instruction to use. + */ + public InvokeInstruction invoke; + /** + * The id of the methodref (or interface methodref). + */ + public int methodref = -1; + /** + * The id of the object ref. Note that for the generator + * framework to work, this must be set to something. If an + * objectref isn't used, just set it to the methodref. + */ + public int objectref = -1; + /** + * The id of the callsite. + */ + public int callsite = -1; + /** + * The id of the expected result. This is used to store the + * expected resolution result. + */ + public int expected; + /** + * The expected result. This needs to be set before the final + * test case is built. + */ + public Result result; + + /** + * Create an empty Builder object. + */ + public Builder() { + classdata = new HashMap<>(); + hier = new HierarchyShape(); + } + + private Builder(final HashMap classdata, + final HierarchyShape hier, + final InvokeInstruction invoke, + final int methodref, + final int objectref, + final int callsite, + final int expected, + final Result result) { + this.classdata = classdata; + this.hier = hier; + this.invoke = invoke; + this.methodref = methodref; + this.objectref = objectref; + this.callsite = callsite; + this.expected = expected; + this.result = result; + } + + private Builder(final Builder other) { + this((HashMap) other.classdata.clone(), + other.hier.copy(), other.invoke, other.methodref, other.objectref, + other.callsite, other.expected, other.result); + } + + public SelectionResolutionTestCase build() { + if (result != null) { + return new SelectionResolutionTestCase(classdata, hier, invoke, + methodref, objectref, + callsite, result); + } else { + return new SelectionResolutionTestCase(classdata, hier, invoke, + methodref, objectref, + callsite, expected); + } + } + + /** + * Set the expected result. + */ + public void setResult(final Result result) { + this.result = result; + } + + /** + * Add a class, and return its id. + * + * @return The new class' id. + */ + public int addClass(final ClassData data) { + final int id = hier.addClass(); + classdata.put(id, data); + return id; + } + + /** + * Add an interface, and return its id. + * + * @return The new class' id. + */ + public int addInterface(final ClassData data) { + final int id = hier.addInterface(); + classdata.put(id, data); + return id; + } + + /** + * Make a copy of this builder. + */ + public Builder copy() { + return new Builder(this); + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + //sb.append("hierarchy:\n" + hier + "\n"); + sb.append("invoke: " + invoke + "\n"); + if (methodref != -1) { + if (hier.isClass(methodref)) { + sb.append("methodref: C" + methodref + "\n"); + } else { + sb.append("methodref: I" + methodref + "\n"); + } + } + if (objectref != -1) { + if (hier.isClass(objectref)) { + sb.append("objectref: C" + objectref + "\n"); + } else { + sb.append("objectref: I" + objectref + "\n"); + } + } + if (callsite != -1) { + if (hier.isClass(callsite)) { + sb.append("callsite: C" + callsite + "\n"); + } else { + sb.append("callsite: I" + callsite + "\n"); + } + } + if (expected != -1) { + if (hier.isClass(expected)) { + sb.append("expected: C" + expected + "\n"); + } else { + sb.append("expected: I" + expected + "\n"); + } + } + sb.append("result: " + result + "\n"); + sb.append("classes:\n\n"); + + for(int i = 0; classdata.containsKey(i); i++) { + final ClassData cd = classdata.get(i); + + if (hier.isClass(i)) { + sb.append("class "); + addPackage(sb, cd); + sb.append("C" + i); + } else { + sb.append("interface "); + addPackage(sb, cd); + sb.append("I" + i); + } + + boolean first = true; + for(final int j : hier.classes()) { + if (hier.inherits(i, j)) { + if (first) { + sb.append(" extends C" + j); + } else { + sb.append(", C" + j); + } + } + } + + first = true; + for(final int j : hier.interfaces()) { + if (hier.inherits(i, j)) { + if (first) { + sb.append(" implements I" + j); + } else { + sb.append(", I" + j); + } + } + } + + sb.append(cd); + } + + return sb.toString(); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Template.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/Template.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,5005 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package selectionresolution; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.LinkedList; + +/** + * Templates are sets of transformations that are applied to a + * SelectionResolutionTestCase.Builder as part of building up a test + * case. Templates should contain a collection of different + * transforms, all of which represent an "interesting" case in a + * general category of cases. + * + */ +public class Template { + + public enum Kind { CLASS, INTERFACE; } + + public final Collection> cases; + public final String name; + + /** + * Create a template from a collection of lambdas that modify a Builder. + * + * @param name The name of the template. + * @param cases The cases in the template. + */ + public Template(final String name, + final Collection> cases) { + this.cases = cases; + this.name = name; + } + + /** + * Build a template out of a set of lambdas that modify a Builder. + * + * @param name The name of the template. + * @param cases The cases in the template. + */ + public Template(final String name, + final Consumer... cases) { + this(name, Arrays.asList(cases)); + } + + /** + * Build a template out of a set of lambdas that modify a Builder. + * Also include all cases from another template. + * + * @param name The name of the template. + * @param include Include all cases from this template. + * @param cases The cases in the template. + */ + public Template(final String name, + final Template include, + final Consumer... cases) { + this(name, new LinkedList(include.cases)); + this.cases.addAll(Arrays.asList(cases)); + } + + /** + * Build a template out of a set of lambdas that modify a Builder. + * Also include all cases from another template. + * + * @param name The name of the template. + * @param include Include all cases from this template. + * @param cases The cases in the template. + */ + public Template(final String name, + final Template... others) { + this(name, new LinkedList()); + + for(final Template template : others) { + cases.addAll(template.cases); + } + } + + /** + * Run all cases in the template. This will run each action in + * the template and then call the next action on a separate copy + * of the builder parameter. + * + * @param The next action to perform of the Builder. + * @param The Builder to modify. + */ + public void runCases(final Consumer next, + final SelectionResolutionTestCase.Builder builder) { + for(final Consumer thiscase : cases) { + final SelectionResolutionTestCase.Builder localbuilder = builder.copy(); + thiscase.accept(localbuilder); + next.accept(localbuilder); + } + } + + public void printCases(final SelectionResolutionTestCase.Builder builder) { + int i = 1; + System.err.println("Template " + name + ":\n"); + for(final Consumer thiscase : cases) { + final SelectionResolutionTestCase.Builder localbuilder = builder.copy(); + thiscase.accept(localbuilder); + System.err.println("Case " + i++); + System.err.println(localbuilder); + } + } + + /* Create an empty class in the given package */ + public static final ClassData emptyClass(final ClassData.Package pck) { + return new ClassData(pck, null); + } + + /* These are functions that are used to build callsite templates */ + public static void callsiteIsMethodref(final SelectionResolutionTestCase.Builder builder) { + builder.callsite = builder.methodref; + } + + public static void callsiteSubclassMethodref(final SelectionResolutionTestCase.Builder builder) { + final int callsite = + builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(callsite, builder.methodref); + builder.callsite = callsite; + } + + public static void callsiteUnrelatedMethodref(final SelectionResolutionTestCase.Builder builder) { + final int callsite = + builder.addClass(Template.emptyClass(ClassData.Package.SAME)); + builder.callsite = callsite; + } + + public static void methodrefIsExpected(final SelectionResolutionTestCase.Builder builder) { + builder.methodref = builder.expected; + } + + public static final Template MethodrefEqualsExpected = + new Template("MethodrefEqualsExpected", + Template::methodrefIsExpected); + + /***************************** + * Set Invoke Template * + *****************************/ + + public static final Template SetInvoke(final SelectionResolutionTestCase.InvokeInstruction invoke) { + return new Template("SetInvoke(" + invoke + ")", + Collections.singleton((builder) -> { + builder.invoke = invoke; + })); + } + + /***************************** + * Result Combo Template * + *****************************/ + public static Template ResultCombo(final EnumSet kinds, + final EnumSet accesses, + final EnumSet contexts, + final EnumSet packages) { + final LinkedList> cases = + new LinkedList<>(); + + for (final Kind kind : kinds) { + for (final MethodData.Access acc : accesses) { + for (final MethodData.Context ctx : contexts) { + if (!(acc == MethodData.Access.PRIVATE && + ctx == MethodData.Context.ABSTRACT)) { + for (final ClassData.Package pck : packages) { + cases.add((builder) -> { + final MethodData meth = new MethodData(acc, ctx); + final ClassData cls = new ClassData(pck, meth); + switch(kind) { + case CLASS: + builder.expected = builder.addClass(cls); + break; + case INTERFACE: + builder.expected = builder.addInterface(cls); + break; + } + }); + } + } + } + } + } + + return new Template("ResultCombo", cases); + } + + public static Template ResolutionOverride(final EnumSet kinds, + final EnumSet accesses, + final EnumSet contexts, + final EnumSet packages) { + final LinkedList> cases = + new LinkedList<>(); + + for (final Kind kind : kinds) { + for (final MethodData.Access acc : accesses) { + for (final MethodData.Context ctx : contexts) { + if (!(acc == MethodData.Access.PRIVATE && + ctx == MethodData.Context.ABSTRACT)) { + for (final ClassData.Package pck : packages) { + cases.add((builder) -> { + final MethodData meth = new MethodData(acc, ctx); + final ClassData cls = new ClassData(pck, meth); + int override = -1; + switch(kind) { + case CLASS: + override = builder.addClass(cls); + break; + case INTERFACE: + override = builder.addInterface(cls); + break; + } + builder.hier.addInherit(override, builder.expected); + }); + } + } + } + } + } + + return new Template("ResultCombo", cases); + } + + /****************************** + * Resolution Templates * + ******************************/ + + private static MethodData getMethodData(final MethodData.Access acc, + final MethodData.Context ctx) { + if (!(acc == MethodData.Access.PUBLIC || + acc == MethodData.Access.PLACEHOLDER) && + ctx != MethodData.Context.STATIC) { + return null; + } else { + return new MethodData(MethodData.Access.PUBLIC, ctx); + } + } + + public static final Template MethodrefNotEqualsExpectedClass = + new Template("MethodrefNotEqualsExpectedClass", + /* Case 1: Inherit from super. + * + * C2[](res) + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final int C2 = builder.expected; + final int C1 = builder.addClass(emptyClass(ClassData.Package.SAME)); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 2: Inherit from super. + * + * C2[](res), I[](def) + * C1[C2,I]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.expected; + final int C1 = builder.addClass(emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.methodref = C1; + }, + /* Case 3: Inherit from super's super. + * + * C3[](res) + * C2[](), I[](def) + * C1[C2,I]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int C3 = builder.expected; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(ClassData.Package.SAME)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.methodref = C1; + }); + + public static final Template IfaceMethodrefNotEqualsExpected = + new Template("IfaceMethodrefNotEqualsExpected", + /* Case 1: Inherit from super. + * + * I2[](res) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.methodref = I1; + }, + /* Case 2: Inherit from super, skip private. + * + * I2[](res) + * I2[I3](priv) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.methodref = I1; + }, + /* Case 3: Inherit from super, skip static. + * + * I2[](res) + * I2[I3](stat) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.methodref = I1; + }, + /* Case 4: Maximally-specific. + * + * I3[](def) + * I2[I3](res) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.methodref = I1; + }, + /* Case 5: Diamond, expected at top. + * + * I4[](res) + * I2[I4](), I3[I4]() + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I4 = builder.expected; + final int I3 = builder.addInterface(emptyClass(pck)); + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }, + /* Case 6: Diamond, skip private. + * + * I4[](res) + * I2[I4](priv), I3[I4]() + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I4 = builder.expected; + final int I3 = builder.addInterface(emptyClass(pck)); + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }, + /* Case 7: Diamond, skip static. + * + * I4[](res) + * I2[I4](stat), I3[I4]() + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I4 = builder.expected; + final int I3 = builder.addInterface(emptyClass(pck)); + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }, + /* Case 8: Diamond, maximally-specific. + * + * I4[](def) + * I2[I4](res), I3[I4]() + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I4 = builder.addInterface(withDef); + final int I3 = builder.addInterface(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }, + /* Case 9: Diamond, maximally-specific, skipping private. + * + * I4[](def) + * I2[I4](res), I3[I4](priv) + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I4 = builder.addInterface(withDef); + final int I3 = builder.addInterface(withPrivDef); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }, + /* Case 10: Diamond, maximally-specific, skipping static. + * + * I4[](def) + * I2[I4](res), I3[I4](stat) + * I1[I2,I3]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I4 = builder.addInterface(withDef); + final int I3 = builder.addInterface(withStatDef); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I4); + builder.hier.addInherit(I3, I4); + builder.methodref = I1; + }); + + public static final Template MethodrefNotEqualsExpectedIface = + new Template("MethodrefNotEqualsExpectedIface", + /* Case 1: Inherit from superinterface. + * + * I[](res) + * C[I]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I = builder.expected; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I); + builder.methodref = C; + }, + /* Case 2: Diamond, expected at top. + * + * I3[](res) + * I1[I3](), I2[I3]() + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 3: Diamond, skipping private. + * + * I3[](def) + * I1[I3](priv), I2[I3]() + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(withPrivDef); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 4: Diamond, skipping static. + * + * I3[](def) + * I1[I3](stat), I2[I3]() + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(withStatDef); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 5: Diamond, maximally-specific. + * + * I3[](def) + * I1[I3](res), I2[I3]() + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.expected; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 6: Diamond, maximally-specific, skipping private. + * + * I3[](def) + * I1[I3](res), I2[I3](priv) + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.expected; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 7: Diamond, maximally-specific, skipping static. + * + * I3[](def) + * I1[I3](res), I2[I3](stat) + * C[I1,I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.expected; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.methodref = C; + }, + /* Case 8: Diamond, with superclass, expected at top. + * + * I2[](res) + * C2[I2](), I1[I2]() + * C1[I1,C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addInterface(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.methodref = C1; + }, + /* Case 9: Diamond with superclass, maximally-specific. + * + * I2[](def) + * C2[I2](), I1[I2](res), + * C1[I1,C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I2 = builder.addInterface(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int I1 = builder.expected; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.methodref = C1; + }, + /* Case 10: Inherit through superclass. + * + * I[](res) + * C2[I]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I = builder.expected; + final int C2 = builder.addInterface(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.methodref = C1; + }, + /* Case 11: Diamond, inherit through superclass, + * expected at top. + * + * I3[](res) + * I1[I3](), I2[I3]() + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 12: Diamond through superclass, skip private. + * + * I3[](res) + * I1[I3](priv), I2[I3]() + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(withPrivDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 13: Diamond through superclass, skip static. + * + * I3[](def) + * I1[I3](stat), I2[I3]() + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(withStatDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 14: Diamond through superclass, maximally-specific. + * + * I3[](def) + * I1[I3](res), I2[I3]() + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.expected; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 15: Diamond through superclass, + * maximally-specific, skip private. + * + * I3[](def) + * I1[I3](res), I2[I3](priv) + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.expected; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 16: Diamond through superclass, + * maximally-specific, skip static. + * + * I3[](pub) + * I1[I3](res), I2[I3](stat) + * C2[I1,I2]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.addInterface(withDef); + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.expected; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 17: Diamond, with superclass, inherit through + * superclass, expected at top. + * + * I2[](res) + * C3[I2](), I1[I2]() + * C2[I1,C3]() + * C1[C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C3 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C3, I2); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }, + /* Case 18: Diamond, with superclass, inherit through + * superclass, maximally-specific. + * + * I2[](def) + * C3[I2](), I1[I2](res), + * C2[I1,C3]() + * C1[I1,C2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I2 = builder.addInterface(withDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int I1 = builder.expected; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C3, I2); + builder.hier.addInherit(C1, C2); + builder.methodref = C1; + }); + + public static final Template IfaceMethodrefAmbiguous = + new Template("IfaceMethodrefAmbiguous", + /* Ambiguous. + * + * I2[](def), I3[](def) + * I1[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData expected = + builder.classdata.get(builder.expected); + final int I3 = builder.addInterface(expected); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I1, I3); + builder.methodref = I1; + }); + + public static final Template MethodrefAmbiguous = + new Template("MethodrefAmbiguous", + /* Ambiguous. + * + * I1[](def), I2[](def) + * C[I2]() = mref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData expected = + builder.classdata.get(builder.expected); + final int I1 = builder.addInterface(expected); + final int I2 = builder.expected; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(C, I1); + builder.methodref = C; + }); + + /****************************** + * Callsite Templates * + ******************************/ + + public static final Template AllCallsiteCases = + new Template("AllCallsiteCases", + Template::callsiteIsMethodref, + Template::callsiteSubclassMethodref, + Template::callsiteUnrelatedMethodref); + + public static final Template InvokespecialCallsiteCases = + new Template("InvokespecialCallsiteCases", + Template::callsiteIsMethodref, + Template::callsiteSubclassMethodref); + + public static final Template CallsiteEqualsMethodref = + new Template("CallsiteEqualsMethodref", + Template::callsiteIsMethodref); + + public static final Template CallsiteSubclassMethodref = + new Template("CallsiteSubclassMethodref", + Template::callsiteSubclassMethodref); + + public static final Template CallsiteUnrelatedToMethodref = + new Template("CallsiteUnrelatedToMethodref", + Template::callsiteUnrelatedMethodref); + + public static final Template CallsiteNotEqualsMethodref = + new Template("CallsiteNotEqualsMethodref", + Template::callsiteSubclassMethodref, + Template::callsiteUnrelatedMethodref); + + /********************************* + * AbstractMethodError Templates * + *********************************/ + + public static final Template ReabstractExpectedIface = + new Template("ReabstractExpectedIface", + (builder) -> {}, + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData expected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = expected.packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = + getMethodData(acc, MethodData.Context.STATIC); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.addInterface(withDef); + final int C1 = builder.expected; + builder.hier.addInherit(C1, C2); + }, + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData expected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = expected.packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = + getMethodData(acc, MethodData.Context.INSTANCE); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.addInterface(withDef); + final int C1 = builder.expected; + builder.hier.addInherit(C1, C2); + }); + + public static final Template ReabstractExpectedClass = + new Template("ReabstractExpectedClass", + ReabstractExpectedIface, + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData expected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = expected.packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = + getMethodData(acc, MethodData.Context.STATIC); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.addClass(withDef); + final int C1 = builder.expected; + builder.hier.addInherit(C1, C2); + }, + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData expected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = expected.packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = + getMethodData(acc, MethodData.Context.INSTANCE); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.addClass(withDef); + final int C1 = builder.expected; + builder.hier.addInherit(C1, C2); + }); + + public static final Template ReabstractMethodrefResolvedClass = + new Template("ReabstractMethodrefResolvedClass", + /* Case 1: Objectref overrides. + * + * C2[](*) = mref + * C1[C2](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 2: Objectref's super overrides. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 3: Objectref's super overrides, skip private. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 4: Objectref's super overrides, skip static. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }); + + public static final Template ReabstractMethodrefResolvedIface = + new Template("ReabstractMethodrefResolvedIface", + /* Case 1: Objectref overrides. + * + * C2[](*) = mref + * C1[C2](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 2: Objectref's super overrides. + * + * C3[](*) = mref + * C2[C3](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 3: Objectref's super overrides, skip private. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 4: Objectref's super overrides, skip static. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 5: Overlapping with new interface overriding. + * + * I2[*](def) = old expected + * C2[*](*) = mref, I1[I2](res) = expected + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 6: Overlapping with new interface, skip private. + * + * I2[*](def) = old expected + * C2[*](*) = mref, I1[I2](res) = expected + * C1[C2,I2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 7: Overlapping with new interface, skip static. + * + * I2[*](def) = old expected + * C2[*](*) = mref, I1[I2](res) = expected + * C1[C2,I2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 8: Overlap with objectref's super with new + * interface overriding, inherit through class. + * + * I2[*](def) = old expected + * C3[](*) = mref, I1[I2](res) = expected + * C2[C3,I1]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 9: Overlap with objectref's super with new + * interface double diamond, overriding. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](def) + * C2[C3,I2](), I1[I2](res) = expected + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 10: Overlap with objectref's super with new + * interface double diamond, skip private. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](res) = expected + * C2[C3,I2](), I1[I2](priv) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withPrivDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 11: Overlap with objectref's super with new + * interface double diamond, skip static. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](res) = expected + * C2[C3,I2](), I1[I2](stat) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 12: Objectref's super overrides, skip interface below. + * + * C3[](*) = mref + * C2[C3](res) = expected, I[](def) + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 13: Objectref's super overrides, skip interface above. + * + * C3[](*) = mref, I[](def) + * C2[C3,I](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + builder.expected = C2; + }); + + public static final Template ReabstractIfaceMethodrefResolved = + new Template("ReabstractIfaceMethodrefResolved", + /* Case 1: Objectref overrides. + * + * I[](*) = mref + * C[I](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I = builder.methodref; + final int C = builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + builder.expected = C; + }, + /* Case 2: Diamond, methodref at top, overriding. + * + * I3[](*) = mref + * I1[I3](), I2[I3](res) = expected + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + builder.expected = I2; + }, + /* Case 3: Diamond, methodref at top, skip static. + * + * I3[](*) = mref + * I1[I3](), I2[I3](res) = expected + * C[I1,I2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(withStatDef); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + builder.expected = I2; + }, + /* Case 4: Diamond, with superclass, methodref at top, + * class overriding. + * + * I2[](*) = mref + * C2[I2](res) = expected, I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 5: Diamond, with superclass, methodref at top, + * class overriding, skip static. + * + * I2[](*) = mref + * C2[I2](res) = expected, I1[I2]() + * C1[I1,C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 6: Diamond, with superclass, expected at top, + * interface overriding + * + * I2[](*) = mref + * C2[I2](), I1[I2](res) = expected + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 7: Diamond, with superclass, expected at top, + * interface skip static + * + * I2[](*) = mref + * C2[I2](), I1[I2](res) = expected + * C1[I1,C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 8: Y, with superclass, overlaping, expected + * at top, class overrides + * + * C2[I2](res) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 9: Diamond, with superclass, overlaping, expected + * at top, class overrides + * + * I2[](def) = old expected + * C2[I2](res) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 10: Diamond, with superclass, overlaping, expected + * at top, class overrides, skipping static + * + * I2[](def) = old expected + * C2[I2](res) = expected, I1[](*) = mref + * C1[I1,C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 11: Superclass overrides. + * + * I[](*) = mref + * C2[I](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + }, + /* Case 12: Superclass overrides, skipping static. + * + * I[](*) = mref + * C2[I](res) = expected + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + }, + /* Case 13: Double diamond, with superclass, inherit through + * superclass, expected at top. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](res) = expected + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 14: Double diamond, with superclass, inherit through + * superclass, expected at top. + * + * I3[](def) = mref + * C3[I3](), I2[*](*) = expected + * C2[I2,C3](), I1[I2](priv) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withPrivDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 15: Double diamond, with superclass, inherit through + * superclass, expected at top. + * + * I3[](def) = mref + * C3[I3](), I2[*](*) = expected + * C2[I2,C3](), I1[I2](stat) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final ClassData withDef = + new ClassData(pck, new MethodData(acc, MethodData.Context.ABSTRACT)); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withStatDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + builder.expected = I2; + }); + + /****************************** + * Abstract Overrides * + ******************************/ + + public static final Template OverrideAbstractExpectedIface = + Template.ResolutionOverride(EnumSet.of(Template.Kind.INTERFACE), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME)); + + public static final Template OverrideAbstractExpectedClass = + Template.ResolutionOverride(EnumSet.allOf(Template.Kind.class), + EnumSet.of(MethodData.Access.PUBLIC), + EnumSet.allOf(MethodData.Context.class), + EnumSet.of(ClassData.Package.SAME)); + + public static final Template SelectionOverrideAbstract = + new Template("SelectionOverrideAbstract", + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData expected = + builder.classdata.get(builder.expected); + final MethodData olddef = + expected.methoddata; + if (MethodData.Context.ABSTRACT == olddef.context) { + final ClassData.Package pck = expected.packageId; + final MethodData.Access acc = olddef.access; + final MethodData mdata = + getMethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.objectref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + } + }); + + + /****************************** + * Ignored Abstract Templates * + ******************************/ + + public static final Template IgnoredAbstract = + new Template("IgnoredAbstract", + (builder) -> {}, + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData methodref = + builder.classdata.get(builder.methodref); + final ClassData.Package pck = methodref.packageId; + final MethodData mdata = + getMethodData(MethodData.Access.PUBLIC, + MethodData.Context.ABSTRACT); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.addInterface(withDef); + final int C1 = builder.methodref; + builder.hier.addInherit(C1, C2); + }); + + /****************************** + * Selection Templates * + ******************************/ + + + + public static final Template TrivialObjectref = + new Template("TrivialObjectref", + Collections.singleton((builder) -> { + builder.objectref = builder.methodref; + })); + + public static final Template TrivialObjectrefNotEqualMethodref = + new Template("TrivialObjectrefNotEqualMethodref", + Collections.singleton( + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = oldexpected.packageId; + final int C2 = builder.methodref; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + })); + + public static final Template MethodrefSelectionResolvedIsClassNoOverride = + new Template("MethodrefSelectionResolvedIsClassNoOverride", + /* Trivial. + * + * C[](*) = mref = oref + */ + (builder) -> { + builder.objectref = builder.methodref; + }, + /* Case 1: Inherit from super. + * + * C2[](*) = mref + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C2 = builder.methodref; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 2: Objectref has private def. + * + * C2[](*) = mref + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 3: Objectref has static def. + * + * C2[](*) = mref + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 4: Skip inherit from interface. + * + * C2[](*) = mref, I[](def) + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = oldexpected.packageId; + final MethodData.Context ctx = + builder.classdata.get(builder.expected).methoddata.context; + final MethodData.Access acc = + builder.classdata.get(builder.expected).methoddata.access; + final MethodData mdata = getMethodData(acc, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.methodref; + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }, + /* Case 5: Objectref's super has a private def. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 6: Objectref's super has a static def. + * + * C3[*](*) = mref + * C2[C3](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + public static final Template MethodrefSelectionResolvedIsClassOverride = + new Template("MethodrefSelectionResolvedIsClassOverride", + /* Case 7: Objectref overrides. + * + * C2[](*) = mref + * C1[C2](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 8: Objectref's super overrides. + * + * C3[*](*) = mref + * C2[C3](res) + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 9: Objectref's super overrides, + * objectref has a private def. + * + * C3[*](*) = mref + * C2[C3](res) + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 10: Objectref's super overrides, + * objectref has a static def. + * + * C3[*](*) = mref + * C2[C3](res) + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }); + + public static final Template MethodrefSelectionResolvedIsClass = + new Template("MethodrefSelectionResolvedIsClass", + MethodrefSelectionResolvedIsClassNoOverride, + MethodrefSelectionResolvedIsClassOverride); + + public static final Template MethodrefSelectionPackageSkipNoOverride = + new Template("MethodrefSelectionPackageSkipNoOverride", + MethodrefSelectionResolvedIsClass, + /* Case 11: Objectref has public def in other package. + * + * C2[](*) = mref + * Other.C1[C2](pub) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 12: Objectref has package private def in other package. + * + * C2[](*) = mref + * Other.C1[C2](pack) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 13: Objectref has protected def in other package. + * + * C2[](*) = mref + * Other.C1[C2](prot) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 14: Objectref's super has a public def in other package. + * + * C3[*](*) = mref + * Other.C2[C3](pub) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 15: Objectref's super has a package + * private def in other package. + * + * C3[*](*) = mref + * Other.C2[C3](pack) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 16: Objectref's super has a protected def + * in other package. + * + * C3[*](*) = mref + * Other.C2[C3](pack) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 18: Objectref's has a public def in other + * package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](priv) + * C1[C2](pub) = oref, expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withPrivDef); + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 19: Objectref's has a package private def in other + * package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](priv) + * C1[C2](pack) = oref, expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withPrivDef); + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 20: Objectref's has a protected def in other + * package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](priv) + * C1[C2](pro) = oref, expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withPrivDef); + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 21: Objectref's super has a public def in other + * package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](pub) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 22: Objectref's superhas a package private + * def in other package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](pack) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 23: Objectref's super has a protected def + * in other package, skip private. + * + * C3[*](*) = mref + * Other.C2[C3](pro) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(ClassData.Package.OTHER, meth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withPrivDef); + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + public static final Template MethodrefSelectionPackageSkip = + new Template("MethodrefSelectionPackageSkip", + MethodrefSelectionPackageSkipNoOverride, + /* Case 17: Transitive override. + * + * C3[*](*) = mref + * C2[C3](pub) + * Other.C1[C2](pack) = oref, expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final MethodData packmeth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withPubDef = new ClassData(pck, meth); + final ClassData withPackDef = + new ClassData(ClassData.Package.OTHER, packmeth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withPubDef); + final int C1 = builder.addClass(withPackDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 24: Transitive override, skip private in between. + * + * C4[*](*) = mref + * C3[C4](pub) + * C2[C3](priv) + * Other.C1[C2](pack) = oref, expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final MethodData packmeth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withPubDef = new ClassData(pck, meth); + final ClassData withPackDef = + new ClassData(ClassData.Package.OTHER, packmeth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C4 = builder.methodref; + final int C3 = builder.addClass(withPubDef); + final int C2 = builder.addClass(withPrivDef); + final int C1 = builder.addClass(withPackDef); + builder.hier.addInherit(C3, C4); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 25: Transitive override, skip private in between. + * + * C4[*](*) = mref + * C3[C4](pub) + * Other.C2[C3](pack) = expected + * C1[C2](pack) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.INSTANCE); + final MethodData packmeth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withPubDef = new ClassData(pck, meth); + final ClassData withPackDef = + new ClassData(ClassData.Package.OTHER, packmeth); + final MethodData privmeth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, privmeth); + final int C4 = builder.methodref; + final int C3 = builder.addClass(withPubDef); + final int C2 = builder.addClass(withPackDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C3, C4); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C2; + builder.expected = C2; + }); + + public static final Template MethodrefSelectionResolvedIsIfaceNoOverride = + new Template("MethodrefSelectionResolvedIsIfaceNoOverride", + /* Trivial objectref. + * + * C[](*) = mref = oref + */ + (builder) -> { + builder.objectref = builder.methodref; + }, + /* Case 1: Inherit from super. + * + * C2[](*) = mref + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C2 = builder.methodref; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 2: Objectref has private def. + * + * C2[](*) = mref + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 3: Objectref has static def. + * + * C2[](*) = mref + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withDef = new ClassData(pck, meth); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 4: Overlapping. + * + * I[*](res) = expected + * C2[*](*) = mref + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C2 = builder.methodref; + final int I = builder.expected; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }, + /* Case 5: Overlapping with new interface. + * + * I2[*](res) = expected + * C2[*](*) = mref, I1[I2]() + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 6: Overlapping with new interface with private def. + * + * I2[*](res) = expected + * C2[*](*) = mref, I1[I2](priv) + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withPrivDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 7: Overlapping with new interface with static def. + * + * I2[*](res) = expected + * C2[*](*) = mref, I1[I2](stat) + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 8: Objectref's super has a private def. + * + * C3[*](*) = mref + * C2[C3](priv) + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 9: Objectref's super has a static def. + * + * C3[*](*) = mref + * C2[C3](stat) + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 10: Overlap with objectref's super. + * + * I[*](res) = expected + * C3[](*) = mref + * C2[C3,I]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I = builder.expected; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + }, + /* Case 11: Overlap with objectref's super with new interface. + * + * I2[*](res) = expected + * C3[](*) = mref, I1[I2]() + * C2[C3,I1]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 12: Overlap with objectref's super with new + * interface with private def. + * + * I2[*](res) = expected + * C3[](*) = mref, I1[I2](priv) + * C2[C3,I1]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(withPrivDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 13: Overlap with objectref's super with new + * interface with static def. + * + * I2[*](res) = expected + * C3[](*) = mref, I1[I2](stat) + * C2[C3,I1]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 14: Overlap with objectref's super with new + * interface double diamond. + * + * I3[*](res) = expected + * C3[](*) = mref, I2[I3]() + * C2[C3,I2](), I1[I2]() + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + }, + /* Case 15: Overlapping with new interface with private def. + * + * C2[*](*) = mref, I1[](priv) + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I1 = builder.addInterface(withPrivDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }, + /* Case 16: Overlapping with new interface with static def. + * + * C2[*](*) = mref, I1[](stat) + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int C2 = builder.methodref; + final int I1 = builder.addInterface(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }); + + public static final Template MethodrefSelectionResolvedIsIface = + new Template("MethodrefSelectionResolvedIsIface", + MethodrefSelectionResolvedIsIfaceNoOverride, + /* Case 17: Objectref overrides. + * + * C2[](*) = mref + * C1[C2](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C2 = builder.methodref; + final int C1 = builder.addClass(withDef); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C1; + }, + /* Case 18: Overlapping with new interface overriding. + * + * I2[*](def) = old expected + * C2[*](*) = mref, I1[I2](res) = expected + * C1[C2,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C2 = builder.methodref; + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 19: Objectref's super overrides. + * + * C3[](*) = mref + * C2[C3](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 20: Overlap with objectref's super with + * new interface overriding. + * + * I2[*](def) = old expected + * C3[](*) = mref, I1[I2](res) = expected + * C2[C3,I1]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I2 = builder.expected; + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(I1, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 21: Overlap with objectref's super with new + * interface double diamond, overriding. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](def) + * C2[C3,I2](), I1[I2](res) = expected + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 22: Objectref's super overrides, skip private. + * + * C3[](*) = mref + * C2[C3](res) = expected + * C1[C2](priv) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withPrivDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 23: Objectref's super overrides, skip static. + * + * C3[](*) = mref + * C2[C3](res) = expected + * C1[C2](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(withStatDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 24: Overlap with objectref's super with new + * interface double diamond, overriding, skip private. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](res) = expected + * C2[C3,I2](), I1[I2](priv) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withPrivDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 25: Overlap with objectref's super with new + * interface double diamond, overriding, skip static. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](res) = expected + * C2[C3,I2](), I1[I2](stat) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int C3 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 26: Skip interface method after class overrides. + * + * C3[](*) = mref + * C2[C3](res) = expected, I[](def) + * C1[C2, I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 27: Skip interface method after class overrides. + * + * C3[](*) = mref, I[](def) + * C2[C3,I](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 28: Overlap with objectref's super with new + * interface double diamond, overriding. + * + * I3[*](def) = old expected + * C3[](*) = mref, I2[I3](def) + * C2[C3,I2](res) = expected, I1[I2](def) = expected + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int C3 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int I3 = builder.expected; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(I2, I3); + builder.objectref = C1; + builder.expected = C2; + }); + + public static final Template IfaceMethodrefSelectionNoOverride = + new Template("IfaceMethodrefSelectionNoOverride", + /* Case 1: Inherit from super. + * + * I[](*) = mref + * C[I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final int I = builder.methodref; + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I); + builder.objectref = C; + }, + /* Case 2: Objectref has static def + * + * I[](*) = mref + * C[I](stat) = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C = builder.addClass(withStatDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + }, + /* Case 3: Diamond, methodref at top. + * + * I3[](*) = mref + * I1[I3](), I2[I3]() + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.methodref; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + }, + /* Case 4: Diamond, methodref at top, skip private def + * + * I3[](*) = mref + * I1[I3](), I2[I3](priv) + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + }, + /* Case 5: Diamond, methodref at top, skip static def + * + * I3[](*) = mref + * I1[I3](), I2[I3](stat) + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + }, + /* Case 6: Diamond, overlap with resolution. + * + * I3[](res) = expected + * I1[I3](), I2[](*) = mref + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.objectref = C; + }, + /* Case 7: Diamond, with superclass, expected at top. + * + * I2[](*) = mref + * C2[I2](), I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 8: Diamond, with superclass, expected at top, + * class has static def. + * + * I2[](*) = mref + * C2[I2](stat), I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 9: Diamond, with superclass, expected at top, + * interface has private def + * + * I2[](*) = mref + * C2[I2](), I1[I2](priv) + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withPrivDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 10: Diamond, with superclass, expected at top, + * interface has static def + * + * I2[](*) = mref + * C2[I2](), I1[I2](stat) + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withPrivDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 11: Y, with superclass, expected + * at top. + * + * C2[](), I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I1 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 12: Y, with superclass, expected + * at top, class has static def + * + * C2[](stat), I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I1 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 13: Diamond, with superclass, overlapping, expected + * at top. + * + * I2[](res) = expected + * C2[I2](), I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 14: Diamond, with superclass, overlapping, expected + * at top, class has static def + * + * I2[](def) = expected + * C2[I2](stat), I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + }, + /* Case 15: Inherit through superclass. + * + * I[](*) = mref + * C2[I]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I = builder.methodref; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + }, + /* Case 16: Superclass has static def. + * + * I[](*) = mref + * C2[I](stat) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C2 = builder.addClass(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.objectref = C1; + }, + /* Case 17: Diamond, inherit through superclass, + * methodref at top. + * + * I3[](*) = mref + * I1[I3](), I2[I3]() + * C2[I1,I2]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.methodref; + final int I2 = builder.addInterface(emptyClass(pck)); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 18: Diamond, with superclass, inherit through + * superclass, methodref at top. + * + * I2[](*) = mref + * C3[I2](), I1[I2]() + * C2[I1,C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C3, I2); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 19: Diamond, inherit through superclass, + * expected at top, skip private. + * + * I3[](*) = mref + * I1[I3](), I2[I3](priv) + * C2[I1,I2]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withPrivDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 20: Diamond, inherit through superclass, + * expected at top, skip static. + * + * I3[](*) = mref + * I1[I3](), I2[I3](stat) + * C2[I1,I2]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withStatDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 21: Diamond, inherit through superclass, + * overlapping, expected at top. + * + * I3[](res) = expected + * I1[I3](), I2[*](*) = mref + * C2[I1,I2]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 22: Y, with superclass, inherit through + * superclass, expected at top. + * + * C3[](), I1[*](*) = mref + * C2[I1,C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I1 = builder.methodref; + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 23: Diamond, with superclass, inherit through + * superclass, overlapping, expected at top. + * + * I2[](res) = expected + * C3[I2](), I1[*](*) = mref + * C2[I1,C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I2); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }, + /* Case 24: Double diamond, with superclass, inherit through + * superclass, overlapping expected at top. + * + * I3[](res) = expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2]() + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }, + /* Case 25: Double diamond, with superclass, inherit through + * superclass, skip private. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](priv) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withPrivDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }, + /* Case 26: Double diamond, with superclass, inherit through + * superclass, skip static. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](stat) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withStatDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }); + + public static final Template IfaceMethodrefSelection = + new Template("IfaceMethodrefSelection", + IfaceMethodrefSelectionNoOverride, + /* Case 27: Objectref overrides. + * + * I[](*) = mref + * C[I](res) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I = builder.methodref; + final int C = builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + builder.expected = C; + }, + /* Case 28: Diamond, methodref at top, overriding. + * + * I3[](*) = mref + * I1[I3](), I2[I3](res) = expected + * C[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.objectref = C; + builder.expected = I2; + }, + /* Case 29: Diamond, with superclass, expected at top, + * class overriding. + * + * I2[](*) = mref + * C2[I2](res) = expected, I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 30: Diamond, with superclass, expected at top, + * interface overriding + * + * I2[](*) = mref + * C2[I2](), I1[I2](res) = expected + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 31: Y, with superclass, overlaping, expected + * at top, class overrides + * + * C2[](res) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 32: Diamond, with superclass, overlaping, expected + * at top, class overrides + * + * I2[](def) = old expected + * C2[I2](res) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 33: Superclass overrides. + * + * I[](*) = mref + * C2[I](res) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.expected = C2; + builder.objectref = C1; + }, + /* Case 34: Diamond, inherit through superclass, + * expected at top, override. + * + * I3[](*) = mref + * I1[I3](), I2[I3](res) = expected + * C2[I1,I2]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I3 = builder.methodref; + final int I2 = builder.addInterface(withDef); + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(I1, I3); + builder.hier.addInherit(I2, I3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = I2; + }, + /* Case 35: Y, with superclass, inherit through + * superclass, overlapping, expected at top. + * + * C3[](res) = expected, I1[*](*) = mref + * C2[I1,C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I1 = builder.methodref; + final int C3 = builder.addClass(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C3; + }, + /* Case 36: Diamond, with superclass, inherit through + * superclass, overlapping, expected at top. + * + * I2[](*) = oldexpected + * C3[I2](res) = expected, I1[*](*) = mref + * C2[I1,C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C3 = builder.addClass(withDef); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I1); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I2); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C3; + }, + /* Case 37: Double diamond, with superclass, inherit through + * superclass, overriding. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](res) = expected + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + builder.expected = I1; + }, + /* Case 38: Double diamond, with superclass, inherit through + * superclass, skip private. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](priv) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withPrivDef = + new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withPrivDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }, + /* Case 39: Double diamond, with superclass, inherit through + * superclass, skip static. + * + * I3[](def) = old expected + * C3[I3](), I2[*](*) = mref + * C2[I2,C3](), I1[I2](stat) + * C1[C2,I1]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I3 = builder.expected; + final int I2 = builder.methodref; + final int I1 = builder.addInterface(withStatDef); + final int C3 = builder.addClass(emptyClass(pck)); + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, I2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C3, I3); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C1, I1); + builder.objectref = C1; + }, + /* Case 40: Superclass overrides. + * + * I[](*) = mref + * C3[I](res) = expected + * C2[C3](stat) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData withDef = + new ClassData(pck, oldexpected.methoddata); + final MethodData meth = + new MethodData(MethodData.Access.PUBLIC, + MethodData.Context.STATIC); + final ClassData withStatDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C3 = builder.addClass(withDef); + final int C2 = builder.addClass(withStatDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C2, I); + builder.expected = C3; + builder.objectref = C1; + }); + + public static final Template IfaceMethodrefSelectionOverrideNonPublic = + new Template("IfaceMethodrefSelection", + /* Case 1: Objectref overrides. + * + * I[](*) = mref + * C[I](priv) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C = builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + builder.expected = C; + }, + /* Case 2: Objectref overrides. + * + * I[](*) = mref + * C[I](prot) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C = builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + builder.expected = C; + }, + /* Case 3: Objectref overrides package private. + * + * I[](*) = mref + * C[I](pack) = oref = expected + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.methodref).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C = builder.addClass(withDef); + builder.hier.addInherit(C, I); + builder.objectref = C; + builder.expected = C; + }, + /* Case 4: Diamond, with superclass, expected at top, + * class overriding with private. + * + * I2[](*) = mref + * C2[I2](priv) = expected, I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 5: Diamond, with superclass, expected at top, + * class overriding with package private. + * + * I2[](*) = mref + * C2[I2](pack) = expected, I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 6: Diamond, with superclass, expected at top, + * class overriding with protected. + * + * I2[](*) = mref + * C2[I2](prot) = expected, I1[I2]() + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.methodref; + final int I1 = builder.addInterface(emptyClass(pck)); + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(I1, I2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 7: Y, with superclass, overlaping, expected + * at top, class overrides + * + * C2[](priv) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 8: Y, with superclass, overlaping, expected + * at top, class overrides + * + * C2[](prot) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 9: Y, with superclass, overlaping, expected + * at top, class overrides + * + * C2[](pack) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 10: Diamond, with superclass, overlaping, expected + * at top, class overrides + * + * I2[](def) = old expected + * C2[I2](priv) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 11: Diamond, with superclass, overlaping, expected + * at top, class overrides + * + * I2[](def) = old expected + * C2[I2](pack) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 12: Diamond, with superclass, overlaping, expected + * at top, class overrides + * + * I2[](def) = old expected + * C2[I2](prot) = expected, I1[](*) = mref + * C1[I1,C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I2 = builder.expected; + final int I1 = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I1); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I2); + builder.objectref = C1; + builder.expected = C2; + }, + /* Case 13: Superclass overrides. + * + * I[](*) = mref + * C2[I](priv) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PRIVATE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.expected = C2; + builder.objectref = C1; + }, + /* Case 14: Superclass overrides. + * + * I[](*) = mref + * C2[I](prot) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PROTECTED, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.expected = C2; + builder.objectref = C1; + }, + /* Case 15: Superclass overrides. + * + * I[](*) = mref + * C2[I](pack) = expected + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.expected).packageId; + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final MethodData meth = + new MethodData(MethodData.Access.PACKAGE, + MethodData.Context.INSTANCE); + final ClassData withDef = + new ClassData(pck, meth); + final int I = builder.methodref; + final int C2 = builder.addClass(withDef); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, I); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C2, I); + builder.expected = C2; + builder.objectref = C1; + }); + + /*********************** + * Ambiguous selection * + ***********************/ + + public static final Template MethodrefAmbiguousResolvedIsIface = + new Template("MethodrefAmbiguousResolvedIsIface", + /* Inherit from interface. + * + * C2[](*) = mref, I[](any) + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = oldexpected.packageId; + final MethodData.Context ctx = oldexpected.methoddata.context; + final MethodData mdata = + new MethodData(MethodData.Access.PUBLIC, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.methodref; + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }); + + public static final Template IfaceMethodrefAmbiguousResolvedIsIface = + new Template("IfaceMethodrefAmbiguousResolvedIsIface", + /* Inherit from interface. + * + * I1[](*) = mref, I2[](any) + * C1[I1,I2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = oldexpected.packageId; + final MethodData.Context ctx = oldexpected.methoddata.context; + final MethodData mdata = + new MethodData(MethodData.Access.PUBLIC, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int I1 = builder.methodref; + final int C = builder.addClass(emptyClass(pck)); + final int I2 = builder.addInterface(withDef); + builder.hier.addInherit(C, I1); + builder.hier.addInherit(C, I2); + builder.objectref = C; + }); + + public static final Template InvokespecialAmbiguousResolvedIsIface = + new Template("InvokespecialAmbiguousResolvedIsIface", + /* Inherit from interface. + * + * C2[](*) = csite, I[](any) + * C1[C2,I]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData oldexpected = + builder.classdata.get(builder.expected); + final ClassData.Package pck = oldexpected.packageId; + final MethodData.Context ctx = oldexpected.methoddata.context; + final MethodData mdata = + new MethodData(MethodData.Access.PUBLIC, ctx); + final ClassData withDef = new ClassData(pck, mdata); + final int C2 = builder.callsite; + final int C1 = builder.addClass(emptyClass(pck)); + final int I = builder.addInterface(withDef); + builder.hier.addInherit(C1, C2); + builder.hier.addInherit(C1, I); + builder.objectref = C1; + }); + + /****************************** + * invokespecial Templates * + ******************************/ + + // Create this by taking MethodrefSelection and replacing + // methodref with callsite. + public static final Template ObjectrefAssignableToCallsite = + new Template("ObjectrefAssignableToCallsite", + /* Case 1: Objectref equals callsite + * + * C[](*) = csite = oref + */ + (builder) -> { + builder.objectref = builder.callsite; + }, + /* Case 2: Inherit from super. + * + * C2[](*) = csite + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.callsite).packageId; + final int C2 = builder.callsite; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + public static final Template ObjectrefExactSubclassOfCallsite = + new Template("ObjectrefSubclassOfCallsite", + /* Inherit from super. + * + * C2[](*) = csite + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.callsite).packageId; + final int C2 = builder.callsite; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + public static final Template ObjectrefEqualsOrExactSubclassOfCallsite = + new Template("ObjectrefEqualsOrExactSubclassOfCallsite", + (final SelectionResolutionTestCase.Builder builder) -> { + builder.objectref = builder.callsite; + }, + /* Inherit from super. + * + * C2[](*) = csite + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.callsite).packageId; + final int C2 = builder.callsite; + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + public static final Template ObjectrefEqualsCallsite = + new Template("TrivialObjectref", + Collections.singleton((builder) -> { + builder.objectref = builder.callsite; + })); + + public static final Template ObjectrefSubclassOfSubclassOfCallsite = + new Template("ObjectrefSubclassOfCallsite", + /* Inherit from super. + * + * C3[](*) = csite + * C2[C3]() + * C1[C2]() = oref + */ + (final SelectionResolutionTestCase.Builder builder) -> { + final ClassData.Package pck = + builder.classdata.get(builder.callsite).packageId; + final int C3 = builder.callsite; + final int C2 = builder.addClass(emptyClass(pck)); + final int C1 = builder.addClass(emptyClass(pck)); + builder.hier.addInherit(C2, C3); + builder.hier.addInherit(C1, C2); + builder.objectref = C1; + }); + + private static class Placeholder extends ClassData { + private final String placeholder; + + + private Placeholder(final String placeholder, + final MethodData methoddata) { + super(ClassData.Package.PLACEHOLDER, methoddata); + this.placeholder = placeholder; + } + + private Placeholder(final String placeholder) { + this(placeholder, null); + } + + public String toString() { + return " = \n\n"; + } + + public static final Placeholder objectref = new Placeholder("objectref"); + public static final Placeholder methodref = new Placeholder("methodref"); + public static final Placeholder callsite = new Placeholder("callsite"); + public static final Placeholder expected = + new Placeholder("expected", + new MethodData(MethodData.Access.PLACEHOLDER, + MethodData.Context.PLACEHOLDER)); + } + + public static void main(String... args) { + + System.err.println("*** Resolution Templates ***\n"); + final SelectionResolutionTestCase.Builder withExpectedIface = + new SelectionResolutionTestCase.Builder(); + withExpectedIface.expected = + withExpectedIface.addInterface(Placeholder.expected); + final SelectionResolutionTestCase.Builder withExpectedClass = + new SelectionResolutionTestCase.Builder(); + withExpectedClass.expected = + withExpectedClass.addClass(Placeholder.expected); + + MethodrefNotEqualsExpectedClass.printCases(withExpectedClass); + MethodrefNotEqualsExpectedIface.printCases(withExpectedIface); + IfaceMethodrefNotEqualsExpected.printCases(withExpectedIface); + MethodrefAmbiguous.printCases(withExpectedIface); + IfaceMethodrefAmbiguous.printCases(withExpectedIface); + ReabstractExpectedIface.printCases(withExpectedIface); + ReabstractExpectedClass.printCases(withExpectedClass); + + final SelectionResolutionTestCase.Builder methodrefExpectedIface = + withExpectedIface.copy(); + methodrefExpectedIface.methodref = + methodrefExpectedIface.addClass(Placeholder.methodref); + final SelectionResolutionTestCase.Builder methodrefExpectedClass = + withExpectedClass.copy(); + methodrefExpectedClass.methodref = + methodrefExpectedClass.addClass(Placeholder.methodref); + final SelectionResolutionTestCase.Builder ifaceMethodref = + withExpectedIface.copy(); + ifaceMethodref.methodref = + ifaceMethodref.addInterface(Placeholder.methodref); + + IgnoredAbstract.printCases(methodrefExpectedIface); + MethodrefSelectionResolvedIsClass.printCases(methodrefExpectedClass); + MethodrefSelectionResolvedIsIface.printCases(methodrefExpectedIface); + IfaceMethodrefSelection.printCases(ifaceMethodref); + IfaceMethodrefSelectionOverrideNonPublic.printCases(ifaceMethodref); + + } + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SelectionResolution/classes/selectionresolution/TestBuilder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SelectionResolution/classes/selectionresolution/TestBuilder.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package selectionresolution; + +import jdk.internal.org.objectweb.asm.Opcodes; + +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC; +import static jdk.internal.org.objectweb.asm.Opcodes.ACC_STATIC; + +class TestBuilder extends Builder { + private final ClassConstruct testClass; + private final Method mainMethod; + + public TestBuilder(int classId, SelectionResolutionTestCase testcase) { + super(testcase); + + // Make a public class Test that contains all our test methods + testClass = new Clazz("Test", null, -1, ACC_PUBLIC); + + // Add a main method + mainMethod = testClass.addMethod("main", "([Ljava/lang/String;)V", ACC_PUBLIC + ACC_STATIC); + + } + + public ClassConstruct getMainTestClass() { + mainMethod.done(); + return testClass; + } + + public void addTest(ClassConstruct clazz, ClassBuilder.ExecutionMode execMode) { + Method m = clazz.addMethod("test", "()Ljava/lang/Integer;", ACC_PUBLIC + ACC_STATIC, execMode); + m.defaultInvoke(getInvokeInstruction(testcase.invoke), + getName(testcase.methodref), + getName(testcase.objectref)); + + mainMethod.makeStaticCall(clazz.getName(), "test", "()Ljava/lang/Integer;").done(); + } + + private static int getInvokeInstruction(SelectionResolutionTestCase.InvokeInstruction instr) { + switch (instr) { + case INVOKESTATIC: + return Opcodes.INVOKESTATIC; + case INVOKESPECIAL: + return Opcodes.INVOKESPECIAL; + case INVOKEINTERFACE: + return Opcodes.INVOKEINTERFACE; + case INVOKEVIRTUAL: + return Opcodes.INVOKEVIRTUAL; + default: + throw new AssertionError(instr.name()); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SharedArchiveFile/BootAppendTests.java --- a/hotspot/test/runtime/SharedArchiveFile/BootAppendTests.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/SharedArchiveFile/BootAppendTests.java Wed Jul 05 21:35:27 2017 +0200 @@ -27,14 +27,12 @@ * @library /testlibrary * @modules java.base/jdk.internal.misc * java.management - * jdk.jartool/sun.tools.jar * jdk.jvmstat/sun.jvmstat.monitor * @ignore 8150683 * @compile javax/sound/sampled/MyClass.jasm * @compile org/omg/CORBA/Context.jasm * @compile nonjdk/myPackage/MyClass.java - * @build jdk.test.lib.* LoadClass - * @run main ClassFileInstaller LoadClass + * @build jdk.test.lib.* LoadClass ClassFileInstaller * @run main/othervm BootAppendTests */ @@ -90,11 +88,9 @@ fos.close(); // build jar files - BasicJarBuilder.build(true, "app", APP_CLASS); - appJar = BasicJarBuilder.getTestJar("app.jar"); - BasicJarBuilder.build("bootAppend", + appJar = ClassFileInstaller.writeJar("app.jar", APP_CLASS); + bootAppendJar = ClassFileInstaller.writeJar("bootAppend.jar", BOOT_APPEND_MODULE_CLASS, BOOT_APPEND_DUPLICATE_MODULE_CLASS, BOOT_APPEND_CLASS); - bootAppendJar = BasicJarBuilder.getTestJar("bootAppend.jar"); // dump ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/SharedArchiveFile/SharedStrings.java --- a/hotspot/test/runtime/SharedArchiveFile/SharedStrings.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/SharedArchiveFile/SharedStrings.java Wed Jul 05 21:35:27 2017 +0200 @@ -32,23 +32,20 @@ * @library /testlibrary /test/lib * @modules java.base/jdk.internal.misc * java.management - * jdk.jartool/sun.tools.jar - * @build SharedStringsWb SharedStrings BasicJarBuilder sun.hotspot.WhiteBox - * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @build SharedStringsWb SharedStrings ClassFileInstaller sun.hotspot.WhiteBox + * @run main ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox * @run main SharedStrings */ import jdk.test.lib.*; public class SharedStrings { public static void main(String[] args) throws Exception { - BasicJarBuilder.build(true, "whitebox", "sun/hotspot/WhiteBox"); - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./SharedStrings.jsa", "-XX:+PrintSharedSpaces", // Needed for bootclasspath match, for CDS to work with WhiteBox API - "-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"), + "-Xbootclasspath/a:" + ClassFileInstaller.getJarPath("whitebox.jar"), "-Xshare:dump"); new OutputAnalyzer(pb.start()) @@ -62,7 +59,7 @@ // these are required modes for shared strings "-XX:+UseCompressedOops", "-XX:+UseG1GC", // needed for access to white box test API - "-Xbootclasspath/a:" + BasicJarBuilder.getTestJar("whitebox.jar"), + "-Xbootclasspath/a:" + ClassFileInstaller.getJarPath("whitebox.jar"), "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", "-Xshare:on", "-showversion", "SharedStringsWb"); diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/Throwable/StackTraceLogging.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/Throwable/StackTraceLogging.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8150778 + * @summary check stacktrace logging + * @library /testlibrary + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools + * @compile TestThrowable.java + * @run driver StackTraceLogging + */ + +import java.io.File; +import java.util.Map; +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; + +public class StackTraceLogging { + static void updateEnvironment(ProcessBuilder pb, String environmentVariable, String value) { + Map env = pb.environment(); + env.put(environmentVariable, value); + } + + static void analyzeOutputOn(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + // These depths match the ones in TestThrowable.java + int[] depths = {10, 34, 100, 1024}; + for (int d : depths) { + output.shouldContain("java.lang.RuntimeException, " + d); + } + output.shouldHaveExitValue(0); + } + + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:stacktrace=info", + "-XX:MaxJavaStackTraceDepth=1024", + "TestThrowable"); + analyzeOutputOn(pb); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/Throwable/TestThrowable.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/Throwable/TestThrowable.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8150778 + * @summary Test exception depths, and code to get stack traces + * @library /testlibrary + * @run main/othervm -XX:MaxJavaStackTraceDepth=1024 TestThrowable + */ + +import java.lang.reflect.Field; +import jdk.test.lib.Asserts; + +public class TestThrowable { + + // Inner class that throws a lot of exceptions + static class Thrower { + static int MaxJavaStackTraceDepth = 1024; // as above + int[] depths = {10, 34, 100, 1024, 2042}; + int count = 0; + + int getDepth(Throwable t) throws Exception { + Field f = Throwable.class.getDeclaredField("depth"); + f.setAccessible(true); // it's private + return f.getInt(t); + } + + void callThrow(int depth) { + if (++count < depth) { + callThrow(depth); + } else { + throw new RuntimeException("depth tested " + depth); + } + } + void testThrow() throws Exception { + for (int d : depths) { + try { + count = getDepth(new Throwable()); + callThrow(d); + } catch(Exception e) { + e.getStackTrace(); + System.out.println(e.getMessage()); + int throwableDepth = getDepth(e); + Asserts.assertTrue(throwableDepth == d || + (d > MaxJavaStackTraceDepth && throwableDepth == MaxJavaStackTraceDepth), + "depth should return the correct value: depth tested=" + + d + " throwableDepth=" + throwableDepth); + } + } + } + } + + public static void main(String... unused) throws Exception { + Thrower t = new Thrower(); + t.testThrow(); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/Unsafe/PrimitiveHostClass.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/Unsafe/PrimitiveHostClass.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Component; +import java.lang.reflect.Field; +import static jdk.internal.org.objectweb.asm.Opcodes.*; +import jdk.internal.org.objectweb.asm.*; +import sun.misc.Unsafe; + +/* + * @test PrimitiveHostClass + * @bug 8140665 + * @summary Throws IllegalArgumentException if host class is a primitive class. + * @library /testlibrary + * @modules java.base/jdk.internal.org.objectweb.asm + * java.base/jdk.internal.misc + * @compile -XDignore.symbol.file PrimitiveHostClass.java + * @run main/othervm PrimitiveHostClass + */ + +public class PrimitiveHostClass { + + static final Unsafe U; + static { + try { + Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe"); + theUnsafe.setAccessible(true); + U = (Unsafe) theUnsafe.get(null); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + public static void testVMAnonymousClass(Class hostClass) { + + // choose a class name in the same package as the host class + String prefix = packageName(hostClass); + if (prefix.length() > 0) + prefix = prefix.replace('.', '/') + "/"; + String className = prefix + "Anon"; + + // create the class + String superName = "java/lang/Object"; + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS + + ClassWriter.COMPUTE_FRAMES); + cw.visit(V1_8, ACC_PUBLIC + ACC_FINAL + ACC_SUPER, + className, null, superName, null); + byte[] classBytes = cw.toByteArray(); + int cpPoolSize = constantPoolSize(classBytes); + Class anonClass = + U.defineAnonymousClass(hostClass, classBytes, new Object[cpPoolSize]); + } + + private static String packageName(Class c) { + if (c.isArray()) { + return packageName(c.getComponentType()); + } else { + String name = c.getName(); + int dot = name.lastIndexOf('.'); + if (dot == -1) return ""; + return name.substring(0, dot); + } + } + + private static int constantPoolSize(byte[] classFile) { + return ((classFile[8] & 0xFF) << 8) | (classFile[9] & 0xFF); + } + + public static void main(String args[]) { + testVMAnonymousClass(PrimitiveHostClass.class); + try { + testVMAnonymousClass(int.class); + throw new RuntimeException( + "Expected IllegalArgumentException not thrown"); + } catch (IllegalArgumentException e) { + // Expected + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/ClassInitializationTest.java --- a/hotspot/test/runtime/logging/ClassInitializationTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/logging/ClassInitializationTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -62,16 +62,6 @@ out.shouldContain("[Initialized").shouldContain("without side effects]"); out.shouldHaveExitValue(0); } - // (3) Ensure that VerboseVerification still triggers appropriate messages. - pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", - "-XX:+VerboseVerification", - "-Xverify:all", - "-Xmx64m", - "BadMap50"); - out = new OutputAnalyzer(pb.start()); - out.shouldContain("End class verification for:"); - out.shouldContain("Verification for BadMap50 failed"); - out.shouldContain("Fail over class verification to old verifier for: BadMap50"); } public static class InnerClass { public static void main(String[] args) throws Exception { diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/ClassResolutionTest.java --- a/hotspot/test/runtime/logging/ClassResolutionTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/logging/ClassResolutionTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -58,13 +58,13 @@ public static void main(String... args) throws Exception { // (1) classresolve should turn on. - ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:classresolve=info", + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:classresolve=debug", ClassResolutionTestMain.class.getName()); OutputAnalyzer o = new OutputAnalyzer(pb.start()); o.shouldContain("[classresolve] ClassResolutionTest$ClassResolutionTestMain$Thing1Handler ClassResolutionTest$ClassResolutionTestMain$Thing1"); // (2) classresolve should turn off. - pb = ProcessTools.createJavaProcessBuilder("-Xlog", + pb = ProcessTools.createJavaProcessBuilder("-Xlog:classresolve=debug", "-Xlog:classresolve=off", ClassResolutionTestMain.class.getName()); o = new OutputAnalyzer(pb.start()); @@ -77,12 +77,12 @@ o.shouldContain("[classresolve] ClassResolutionTest$ClassResolutionTestMain$Thing1Handler ClassResolutionTest$ClassResolutionTestMain$Thing1"); // (4) TraceClassResolution should turn off. - pb = ProcessTools.createJavaProcessBuilder("-Xlog", + pb = ProcessTools.createJavaProcessBuilder("-Xlog:classresolve=debug", "-XX:-TraceClassResolution", ClassResolutionTestMain.class.getName()); o = new OutputAnalyzer(pb.start()); o.shouldNotContain("[classresolve]"); + }; - }; } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/ExceptionsTest.java --- a/hotspot/test/runtime/logging/ExceptionsTest.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/runtime/logging/ExceptionsTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -45,7 +45,7 @@ static void analyzeOutputOn(ProcessBuilder pb) throws Exception { OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldContain(""); + output.shouldContain(""); output.shouldContain(" thrown in interpreter method "); output.shouldHaveExitValue(0); } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/LoaderConstraintsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/LoaderConstraintsTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + * @test LoaderConstraintsTest + * @bug 8149996 + * @library /testlibrary /runtime/testlibrary + * @library classes + * @build ClassUnloadCommon test.Empty jdk.test.lib.* jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools + * @run driver LoaderConstraintsTest + */ + +import jdk.test.lib.*; +import java.lang.ref.WeakReference; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class LoaderConstraintsTest { + private static OutputAnalyzer out; + private static ProcessBuilder pb; + private static class ClassUnloadTestMain { + public static void main(String... args) throws Exception { + String className = "test.Empty"; + ClassLoader cl = ClassUnloadCommon.newClassLoader(); + Class c = cl.loadClass(className); + cl = null; c = null; + ClassUnloadCommon.triggerUnloading(); + } + } + + // Use the same command-line heap size setting as ../ClassUnload/UnloadTest.java + static ProcessBuilder exec(String... args) throws Exception { + List argsList = new ArrayList<>(); + Collections.addAll(argsList, args); + Collections.addAll(argsList, "-Xmn8m"); + Collections.addAll(argsList, "-Dtest.classes=" + System.getProperty("test.classes",".")); + Collections.addAll(argsList, ClassUnloadTestMain.class.getName()); + return ProcessTools.createJavaProcessBuilder(argsList.toArray(new String[argsList.size()])); + } + + public static void main(String... args) throws Exception { + + // -XX:+TraceLoaderConstraints + pb = exec("-XX:+TraceLoaderConstraints"); + out = new OutputAnalyzer(pb.start()); + out.getOutput(); + out.shouldContain("[classload,constraints] adding new constraint for name: java/lang/Class, loader[0]: jdk/internal/loader/ClassLoaders$AppClassLoader, loader[1]: "); + + // -Xlog:classload+constraints=info + pb = exec("-Xlog:classload+constraints=info"); + out = new OutputAnalyzer(pb.start()); + out.shouldContain("[classload,constraints] adding new constraint for name: java/lang/Class, loader[0]: jdk/internal/loader/ClassLoaders$AppClassLoader, loader[1]: "); + + // -XX:-TraceLoaderConstraints + pb = exec("-XX:-TraceLoaderConstraints"); + out = new OutputAnalyzer(pb.start()); + out.shouldNotContain("[classload,constraints]"); + + // -Xlog:classload+constraints=off + pb = exec("-Xlog:classload+constraints=off"); + out = new OutputAnalyzer(pb.start()); + out.shouldNotContain("[classload,constraints]"); + + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/MonitorMismatchHelper.jasm --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/MonitorMismatchHelper.jasm Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +super public class MonitorMismatchHelper + version 52:0 +{ + +private Field c:I; + +public Method "":"()V" + stack 2 locals 1 +{ + aload_0; + invokespecial Method java/lang/Object."":"()V"; + aload_0; + iconst_0; + putfield Field c:"I"; + return; +} + +public synchronized Method increment:"()V" + stack 3 locals 1 +{ + aload_0; + dup; + getfield Field c:"I"; + iconst_1; + iadd; + putfield Field c:"I"; + return; +} + +public synchronized Method decrement:"()V" + stack 3 locals 1 +{ + aload_0; + dup; + getfield Field c:"I"; + iconst_1; + isub; + putfield Field c:"I"; + return; +} + +public synchronized Method value:"()I" + stack 1 locals 1 +{ + aload_0; + getfield Field c:"I"; + ireturn; +} + +public static varargs Method main:"([Ljava/lang/String;)V" + stack 2 locals 4 +{ + new class MonitorMismatchHelper; + dup; + invokespecial Method "":"()V"; + astore_1; + aload_1; + dup; + astore_2; + monitorenter; + try t0; + aload_1; + invokevirtual Method increment:"()V"; + aload_1; + invokevirtual Method increment:"()V"; + aload_1; + invokevirtual Method decrement:"()V"; + getstatic Field java/lang/System.out:"Ljava/io/PrintStream;"; + aload_1; + invokevirtual Method value:"()I"; + invokevirtual Method java/io/PrintStream.print:"(I)V"; + aload_2; + monitorexit; + endtry t0; + goto L44; + catch t0 #0; + catch t1 #0; + try t1; + stack_frame_type full; + locals_map class "[Ljava/lang/String;", class MonitorMismatchHelper, class java/lang/Object; + stack_map class java/lang/Throwable; + astore_3; + aload_2; + endtry t1; + aload_3; + athrow; + L44: stack_frame_type chop1; + return; +} + +} // end Class MonitorMismatchHelper diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/MonitorMismatchTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/MonitorMismatchTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + * @test MonitorMismatchTest + * @bug 8150084 + * @library /testlibrary + * @compile MonitorMismatchHelper.jasm + * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools jdk.test.lib.Platform + * @run driver MonitorMismatchTest + */ + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; +import jdk.test.lib.Platform; + +public class MonitorMismatchTest { + + public static void main(String... args) throws Exception { + if (!Platform.isEmbedded()){ + // monitormismatch should turn on. + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xcomp", + "-XX:+TieredCompilation", + "-Xlog:monitormismatch=info", + "MonitorMismatchHelper"); + OutputAnalyzer o = new OutputAnalyzer(pb.start()); + o.shouldContain("[monitormismatch] Monitor mismatch in method"); + + // monitormismatch should turn off. + pb = ProcessTools.createJavaProcessBuilder("-Xcomp", + "-XX:+TieredCompilation", + "-Xlog:monitormismatch=off", + "MonitorMismatchHelper"); + o = new OutputAnalyzer(pb.start()); + o.shouldNotContain("[monitormismatch]"); + } + }; + +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/RemovedDevelopFlagsTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/RemovedDevelopFlagsTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + * @test RemovedDevelopFlagsTest + * @bug 8146632 + * @library /testlibrary + * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools + * @run driver RemovedDevelopFlagsTest + */ +import jdk.test.lib.*; + +public class RemovedDevelopFlagsTest { + public static ProcessBuilder pb; + + public static class RemovedDevelopFlagsTestMain { + public static void main(String... args) { + System.out.print("Hello!"); + } + } + + public static void exec(String flag, String value) throws Exception { + pb = ProcessTools.createJavaProcessBuilder("-XX:+"+flag, RemovedDevelopFlagsTestMain.class.getName()); + OutputAnalyzer o = new OutputAnalyzer(pb.start()); + o.shouldContain(flag+" has been removed. Please use "+value+" instead."); + o.shouldHaveExitValue(1); + } + + public static void main(String... args) throws Exception { + if (Platform.isDebugBuild()){ + exec("TraceClassInitialization", "-Xlog:classinit"); + exec("TraceClassLoaderData", "-Xlog:classloaderdata"); + exec("TraceDefaultMethods", "-Xlog:defaultmethods=debug"); + exec("TraceItables", "-Xlog:itables=debug"); + exec("TraceSafepoint", "-Xlog:safepoint=debug"); + exec("TraceStartupTime", "-Xlog:startuptime"); + exec("TraceVMOperation", "-Xlog:vmoperation=debug"); + exec("PrintVtables", "-Xlog:vtables=debug"); + exec("VerboseVerification", "-Xlog:verification"); + } + }; +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/SafepointCleanupTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/SafepointCleanupTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8149991 + * @summary safepointcleanup=info should have output from the code + * @library /testlibrary + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools + * @run driver SafepointCleanupTest + */ + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; + +public class SafepointCleanupTest { + static void analyzeOutputOn(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("[safepointcleanup]"); + output.shouldContain("deflating idle monitors"); + output.shouldContain("updating inline caches"); + output.shouldContain("compilation policy safepoint handler"); + output.shouldContain("mark nmethods"); + output.shouldContain("purging class loader data graph"); + output.shouldHaveExitValue(0); + } + + static void analyzeOutputOff(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("[safepointcleanup]"); + output.shouldHaveExitValue(0); + } + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:safepointcleanup=info", + InnerClass.class.getName()); + analyzeOutputOn(pb); + + pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceSafepointCleanupTime", + InnerClass.class.getName()); + analyzeOutputOn(pb); + + pb = ProcessTools.createJavaProcessBuilder("-Xlog:safepointcleanup=off", + InnerClass.class.getName()); + analyzeOutputOff(pb); + + pb = ProcessTools.createJavaProcessBuilder("-XX:-TraceSafepointCleanupTime", + InnerClass.class.getName()); + analyzeOutputOff(pb); + } + + public static class InnerClass { + public static void main(String[] args) throws Exception { + System.out.println("Safepoint Cleanup test"); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/runtime/logging/VerificationTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/logging/VerificationTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8150083 + * @summary verification=info output should have output from the code + * @library /testlibrary + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.lib.OutputAnalyzer jdk.test.lib.ProcessTools + * @run driver VerificationTest + */ + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; + +public class VerificationTest { + static void analyzeOutputOn(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("[verification]"); + output.shouldContain("Verifying class VerificationTest$InternalClass with new format"); + output.shouldContain("Verifying method VerificationTest$InternalClass.()V"); + output.shouldContain("End class verification for: VerificationTest$InternalClass"); + output.shouldHaveExitValue(0); + } + + static void analyzeOutputOff(ProcessBuilder pb) throws Exception { + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("[verification]"); + output.shouldHaveExitValue(0); + } + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:verification=info", + InternalClass.class.getName()); + analyzeOutputOn(pb); + + pb = ProcessTools.createJavaProcessBuilder("-Xlog:verification=off", + InternalClass.class.getName()); + analyzeOutputOff(pb); + } + + public static class InternalClass { + public static void main(String[] args) throws Exception { + System.out.println("VerificationTest"); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/serviceability/tmtools/jstack/JstackThreadTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/serviceability/tmtools/jstack/JstackThreadTest.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.Arrays; +import jdk.test.lib.JDKToolLauncher; +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.ProcessTools; + +/* + * @test JstackThreadTest + * @bug 8151442 + * @summary jstack doesn't close quotation marks properly with threads' name greater than 1996 characters + * @library /testlibrary + * @build jdk.test.lib.* + * @ignore 8153319 + * @run main JstackThreadTest + */ +public class JstackThreadTest { + static class NamedThread extends Thread { + NamedThread(String name) { + setName(name); + } + @Override + public void run() { + try { + Thread.sleep(2000); + } catch(Exception e){ + e.printStackTrace(); + } + } + } + + public static void main(String[] args) throws Exception { + StringBuilder sb = new StringBuilder(); + /*create a string more than 1996 character */ + for(int i = 0; i < 1998; i++){ + sb.append("a"); + } + testWithName(sb.toString()); + } + + private static void testWithName(String name) throws Exception { + // Start a thread with a long thread name + NamedThread thread = new NamedThread(name); + thread.start(); + ProcessBuilder processBuilder = new ProcessBuilder(); + JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jstack"); + launcher.addToolArg("-l"); + launcher.addToolArg(Long.toString(ProcessTools.getProcessId())); + processBuilder.command(launcher.getCommand()); + System.out.println(Arrays.toString(processBuilder.command().toArray()).replace(",", "")); + OutputAnalyzer output = ProcessTools.executeProcess(processBuilder); + System.out.println(output.getOutput()); + output.shouldContain("\""+ name + "\""); + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/stress/gc/TestMultiThreadStressRSet.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/stress/gc/TestMultiThreadStressRSet.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.PrintStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import sun.hotspot.WhiteBox; + +/* + * @test TestMultiThreadStressRSet.java + * @key stress + * @requires vm.gc=="G1" | vm.gc=="null" + * @requires os.maxMemory > 2G + * + * @summary Stress G1 Remembered Set using multiple threads + * @library /test/lib /testlibrary + * @build sun.hotspot.WhiteBox + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UseG1GC -XX:G1SummarizeRSetStatsPeriod=1 -Xlog:gc + * -Xmx500m -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestMultiThreadStressRSet 10 4 + * + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UseG1GC -XX:G1SummarizeRSetStatsPeriod=100 -Xlog:gc + * -Xmx1G -XX:G1HeapRegionSize=8m -XX:MaxGCPauseMillis=1000 TestMultiThreadStressRSet 60 16 + * + * @run main/othervm/timeout=700 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UseG1GC -XX:G1SummarizeRSetStatsPeriod=100 -Xlog:gc + * -Xmx500m -XX:G1HeapRegionSize=1m -XX:MaxGCPauseMillis=1000 TestMultiThreadStressRSet 600 32 + */ +public class TestMultiThreadStressRSet { + + private static final Random RND = new Random(2015 * 2016); + private static final WhiteBox WB = WhiteBox.getWhiteBox(); + private static final int REF_SIZE = WB.getHeapOopSize(); + private static final int REGION_SIZE = WB.g1RegionSize(); + + // How many regions to use for the storage + private static final int STORAGE_REGIONS = 20; + + // Size a single obj in the storage + private static final int OBJ_SIZE = 1024; + + // How many regions of young/old gen to use in the BUFFER + private static final int BUFFER_YOUNG_REGIONS = 60; + private static final int BUFFER_OLD_REGIONS = 40; + + // Total number of objects in the storage. + private final int N; + + // The storage of byte[] + private final List STORAGE; + + // Where references to the Storage will be stored + private final List BUFFER; + + // The length of a buffer element. + // RSet deals with "cards" (areas of 512 bytes), not with single refs + // So, to affect the RSet the BUFFER refs should be allocated in different + // memory cards. + private final int BUF_ARR_LEN = 100 * (512 / REF_SIZE); + + // Total number of objects in the young/old buffers + private final int YOUNG; + private final int OLD; + + // To cause Remembered Sets change their coarse level the test uses a window + // within STORAGE. All the BUFFER elements refer to only STORAGE objects + // from the current window. The window is defined by a range. + // The first element has got the index: 'windowStart', + // the last one: 'windowStart + windowSize - 1' + // The window is shifting periodically. + private int windowStart; + private final int windowSize; + + // Counter of created worker threads + private int counter = 0; + + private volatile String errorMessage = null; + private volatile boolean isEnough = false; + + public static void main(String args[]) { + if (args.length != 2) { + throw new IllegalArgumentException("TEST BUG: wrong arg count " + args.length); + } + long time = Long.parseLong(args[0]); + int threads = Integer.parseInt(args[1]); + new TestMultiThreadStressRSet().test(time * 1000, threads); + } + + /** + * Initiates test parameters, fills out the STORAGE and BUFFER. + */ + public TestMultiThreadStressRSet() { + + N = (REGION_SIZE - 1) * STORAGE_REGIONS / OBJ_SIZE + 1; + STORAGE = new ArrayList<>(N); + int bytes = OBJ_SIZE - 20; + for (int i = 0; i < N - 1; i++) { + STORAGE.add(new byte[bytes]); + } + STORAGE.add(new byte[REGION_SIZE / 2 + 100]); // humongous + windowStart = 0; + windowSize = REGION_SIZE / OBJ_SIZE; + + BUFFER = new ArrayList<>(); + int sizeOfBufferObject = 20 + REF_SIZE * BUF_ARR_LEN; + OLD = REGION_SIZE * BUFFER_OLD_REGIONS / sizeOfBufferObject; + YOUNG = REGION_SIZE * BUFFER_YOUNG_REGIONS / sizeOfBufferObject; + for (int i = 0; i < OLD + YOUNG; i++) { + BUFFER.add(new Object[BUF_ARR_LEN]); + } + } + + /** + * Does the testing. Steps: + *
    + *
  • starts the Shifter thread + *
  • during the given time starts new Worker threads, keeping the number + * of live thread under limit. + *
  • stops the Shifter thread + *
+ * + * @param timeInMillis how long to stress + * @param maxThreads the maximum number of Worker thread working together. + */ + public void test(long timeInMillis, int maxThreads) { + if (timeInMillis <= 0 || maxThreads <= 0) { + throw new IllegalArgumentException("TEST BUG: be positive!"); + } + System.out.println("%% Time to work: " + timeInMillis / 1000 + "s"); + System.out.println("%% Number of threads: " + maxThreads); + long finish = System.currentTimeMillis() + timeInMillis; + Shifter shift = new Shifter(this, 1000, (int) (windowSize * 0.9)); + shift.start(); + for (int i = 0; i < maxThreads; i++) { + new Worker(this, 100).start(); + } + try { + while (System.currentTimeMillis() < finish && errorMessage == null) { + Thread.sleep(100); + } + } catch (Throwable t) { + printAllStackTraces(System.err); + t.printStackTrace(System.err); + this.errorMessage = t.getMessage(); + } finally { + isEnough = true; + } + System.out.println("%% Total work cycles: " + counter); + if (errorMessage != null) { + throw new RuntimeException(errorMessage); + } + } + + /** + * Returns an element from from the BUFFER (an object array) to keep + * references to the storage. + * + * @return an Object[] from buffer. + */ + private Object[] getFromBuffer() { + int index = counter % (OLD + YOUNG); + synchronized (BUFFER) { + if (index < OLD) { + if (counter % 100 == (counter / 100) % 100) { + // need to generate garbage in the old gen to provoke mixed GC + return replaceInBuffer(index); + } else { + return BUFFER.get(index); + } + } else { + return replaceInBuffer(index); + } + } + } + + private Object[] replaceInBuffer(int index) { + Object[] objs = new Object[BUF_ARR_LEN]; + BUFFER.set(index, objs); + return objs; + } + + /** + * Returns a random object from the current window within the storage. + * A storage element with index from windowStart to windowStart+windowSize. + * + * @return a random element from the current window within the storage. + */ + private Object getRandomObject() { + int index = (windowStart + RND.nextInt(windowSize)) % N; + return STORAGE.get(index); + } + + private static void printAllStackTraces(PrintStream ps) { + Map traces = Thread.getAllStackTraces(); + for (Thread t : traces.keySet()) { + ps.println(t.toString() + " " + t.getState()); + for (StackTraceElement traceElement : traces.get(t)) { + ps.println("\tat " + traceElement); + } + } + } + + /** + * Thread to create a number of references from BUFFER to STORAGE. + */ + private static class Worker extends Thread { + + final TestMultiThreadStressRSet boss; + final int refs; // number of refs to OldGen + + /** + * @param boss the tests + * @param refsToOldGen how many references to the OldGen to create + */ + Worker(TestMultiThreadStressRSet boss, int refsToOldGen) { + this.boss = boss; + this.refs = refsToOldGen; + } + + @Override + public void run() { + try { + while (!boss.isEnough) { + Object[] objs = boss.getFromBuffer(); + int step = objs.length / refs; + for (int i = 0; i < refs; i += step) { + objs[i] = boss.getRandomObject(); + } + boss.counter++; + } + } catch (Throwable t) { + t.printStackTrace(System.out); + boss.errorMessage = t.getMessage(); + } + } + } + + /** + * Periodically shifts the current STORAGE window, removing references + * in BUFFER that refer to objects outside the window. + */ + private static class Shifter extends Thread { + + final TestMultiThreadStressRSet boss; + final int sleepTime; + final int shift; + + Shifter(TestMultiThreadStressRSet boss, int sleepTime, int shift) { + this.boss = boss; + this.sleepTime = sleepTime; + this.shift = shift; + } + + @Override + public void run() { + try { + while (!boss.isEnough) { + Thread.sleep(sleepTime); + boss.windowStart += shift; + for (int i = 0; i < boss.OLD; i++) { + Object[] objs = boss.BUFFER.get(i); + for (int j = 0; j < objs.length; j++) { + objs[j] = null; + } + } + if (!WB.g1InConcurrentMark()) { + System.out.println("%% start CMC"); + WB.g1StartConcMarkCycle(); + } else { + System.out.println("%% CMC is already in progress"); + } + } + } catch (Throwable t) { + t.printStackTrace(System.out); + boss.errorMessage = t.getMessage(); + } + } + } +} + diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/stress/gc/TestStressIHOPMultiThread.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/stress/gc/TestStressIHOPMultiThread.java Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + /* + * @test TestStressIHOPMultiThread + * @bug 8148397 + * @key stress + * @summary Stress test for IHOP + * @requires vm.gc=="G1" | vm.gc=="null" + * @run main/othervm/timeout=200 -Xmx128m -XX:G1HeapWastePercent=0 -XX:G1MixedGCCountTarget=1 + * -XX:+UseG1GC -XX:G1HeapRegionSize=1m -XX:+G1UseAdaptiveIHOP + * -Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug:TestStressIHOPMultiThread1.log + * -Dtimeout=2 -DheapUsageMinBound=30 -DheapUsageMaxBound=80 + * -Dthreads=2 TestStressIHOPMultiThread + * @run main/othervm/timeout=200 -Xmx256m -XX:G1HeapWastePercent=0 -XX:G1MixedGCCountTarget=1 + * -XX:+UseG1GC -XX:G1HeapRegionSize=2m -XX:+G1UseAdaptiveIHOP + * -Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug:TestStressIHOPMultiThread2.log + * -Dtimeout=2 -DheapUsageMinBound=60 -DheapUsageMaxBound=90 + * -Dthreads=3 TestStressIHOPMultiThread + * @run main/othervm/timeout=200 -Xmx256m -XX:G1HeapWastePercent=0 -XX:G1MixedGCCountTarget=1 + * -XX:+UseG1GC -XX:G1HeapRegionSize=4m -XX:-G1UseAdaptiveIHOP + * -Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug:TestStressIHOPMultiThread3.log + * -Dtimeout=2 -DheapUsageMinBound=40 -DheapUsageMaxBound=90 + * -Dthreads=5 TestStressIHOPMultiThread + * @run main/othervm/timeout=200 -Xmx128m -XX:G1HeapWastePercent=0 -XX:G1MixedGCCountTarget=1 + * -XX:+UseG1GC -XX:G1HeapRegionSize=8m -XX:+G1UseAdaptiveIHOP + * -Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug:TestStressIHOPMultiThread4.log + * -Dtimeout=2 -DheapUsageMinBound=20 -DheapUsageMaxBound=90 + * -Dthreads=10 TestStressIHOPMultiThread + * @run main/othervm/timeout=200 -Xmx512m -XX:G1HeapWastePercent=0 -XX:G1MixedGCCountTarget=1 + * -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:+G1UseAdaptiveIHOP + * -Xlog:gc+ihop=debug,gc+ihop+ergo=debug,gc+ergo=debug:TestStressIHOPMultiThread5.log + * -Dtimeout=2 -DheapUsageMinBound=20 -DheapUsageMaxBound=90 + * -Dthreads=17 TestStressIHOPMultiThread + */ + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +/** + * Stress test for Adaptive IHOP. Starts a number of threads that fill and free + * specified amount of memory. Tests work with enabled IHOP logging. + * + */ +public class TestStressIHOPMultiThread { + + public final static List GARBAGE = new LinkedList<>(); + + private final long HEAP_SIZE; + // Amount of memory to be allocated before iterations start + private final long HEAP_PREALLOC_SIZE; + // Amount of memory to be allocated and freed during iterations + private final long HEAP_ALLOC_SIZE; + private final int CHUNK_SIZE = 100000; + + private final int TIMEOUT; + private final int THREADS; + private final int HEAP_LOW_BOUND; + private final int HEAP_HIGH_BOUND; + + private volatile boolean running = true; + private final List threads; + + public static void main(String[] args) throws InterruptedException { + new TestStressIHOPMultiThread().start(); + + } + + TestStressIHOPMultiThread() { + + TIMEOUT = Integer.getInteger("timeout") * 60; + THREADS = Integer.getInteger("threads"); + HEAP_LOW_BOUND = Integer.getInteger("heapUsageMinBound"); + HEAP_HIGH_BOUND = Integer.getInteger("heapUsageMaxBound"); + HEAP_SIZE = Runtime.getRuntime().maxMemory(); + + HEAP_PREALLOC_SIZE = HEAP_SIZE * HEAP_LOW_BOUND / 100; + HEAP_ALLOC_SIZE = HEAP_SIZE * (HEAP_HIGH_BOUND - HEAP_LOW_BOUND) / 100; + + threads = new ArrayList<>(THREADS); + } + + public void start() throws InterruptedException { + fill(); + createThreads(); + waitForStress(); + stressDone(); + waitForFinish(); + } + + /** + * Fills HEAP_PREALLOC_SIZE bytes of garbage. + */ + private void fill() { + long allocated = 0; + while (allocated < HEAP_PREALLOC_SIZE) { + GARBAGE.add(new byte[CHUNK_SIZE]); + allocated += CHUNK_SIZE; + } + } + + /** + * Creates a number of threads which will fill and free amount of memory. + */ + private void createThreads() { + for (int i = 0; i < THREADS; ++i) { + System.out.println("Create thread " + i); + AllocationThread thread =new TestStressIHOPMultiThread.AllocationThread(i, HEAP_ALLOC_SIZE / THREADS); + // Put reference to thread garbage into common garbage for avoiding possible optimization. + GARBAGE.add(thread.getList()); + threads.add(thread); + } + threads.forEach(t -> t.start()); + } + + /** + * Wait each thread for finishing + */ + private void waitForFinish() { + threads.forEach(thread -> { + thread.silentJoin(); + }); + } + + private boolean isRunning() { + return running; + } + + private void stressDone() { + running = false; + } + + private void waitForStress() throws InterruptedException { + Thread.sleep(TIMEOUT * 1000); + } + + private class AllocationThread extends Thread { + + private final List garbage; + + private final long amountOfGarbage; + private final int threadId; + + public AllocationThread(int id, long amount) { + super("Thread " + id); + threadId = id; + amountOfGarbage = amount; + garbage = new LinkedList<>(); + } + + /** + * Returns list of garbage. + * @return List with thread garbage. + */ + public List getList(){ + return garbage; + } + + @Override + public void run() { + System.out.println("Start the thread " + threadId); + while (TestStressIHOPMultiThread.this.isRunning()) { + allocate(amountOfGarbage); + free(); + } + } + + private void silentJoin() { + System.out.println("Join the thread " + threadId); + try { + join(); + } catch (InterruptedException ie) { + throw new RuntimeException(ie); + } + } + + /** + * Allocates thread local garbage + */ + private void allocate(long amount) { + long allocated = 0; + while (allocated < amount && TestStressIHOPMultiThread.this.isRunning()) { + garbage.add(new byte[CHUNK_SIZE]); + allocated += CHUNK_SIZE; + } + } + + /** + * Frees thread local garbage + */ + private void free() { + garbage.clear(); + } + } +} diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/testlibrary/ClassFileInstaller.java --- a/hotspot/test/testlibrary/ClassFileInstaller.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/testlibrary/ClassFileInstaller.java Wed Jul 05 21:35:27 2017 +0200 @@ -21,6 +21,10 @@ * questions. */ +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; import java.io.FileNotFoundException; import java.io.InputStream; import java.io.ByteArrayInputStream; @@ -28,58 +32,226 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; /** - * Dump a class file for a class on the class path in the current directory + * Dump a class file for a class on the class path in the current directory, or + * in the specified JAR file. This class is usually used when you build a class + * from a test library, but want to use this class in a sub-process. + * + * For example, to build the following library class: + * test/lib/sun/hotspot/WhiteBox.java + * + * You would use the following tags: + * + * @library /test/lib + * @build sun.hotspot.WhiteBox + * + * JTREG would build the class file under + * ${JTWork}/classes/test/lib/sun/hotspot/WhiteBox.class + * + * With you run your main test class using "@run main MyMainClass", JTREG would setup the + * -classpath to include "${JTWork}/classes/test/lib/", so MyMainClass would be able to + * load the WhiteBox class. + * + * However, if you run a sub process, and do not wish to use the exact same -classpath, + * You can use ClassFileInstaller to ensure that WhiteBox is available in the current + * directory of your test: + * + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * + * Or, you can use the -jar option to store the class in the specified JAR file. If a relative + * path name is given, the JAR file would be relative to the current directory of + * + * @run main ClassFileInstaller -jar myjar.jar sun.hotspot.WhiteBox */ public class ClassFileInstaller { /** + * You can enable debug tracing of ClassFileInstaller by running JTREG with + * jtreg -DClassFileInstaller.debug=true ... + */ + public static boolean DEBUG = Boolean.getBoolean("ClassFileInstaller.debug"); + + /** * @param args The names of the classes to dump * @throws Exception */ public static void main(String... args) throws Exception { - for (String arg : args) { - writeClassToDisk(arg); + if (args.length > 1 && args[0].equals("-jar")) { + if (args.length < 2) { + throw new RuntimeException("Usage: ClassFileInstaller \n" + + "where possible options include:\n" + + " -jar Write to the JAR file "); + } + writeJar(args[1], null, args, 2, args.length); + } else { + if (DEBUG) { + System.out.println("ClassFileInstaller: Writing to " + System.getProperty("user.dir")); + } + for (String arg : args) { + writeClassToDisk(arg); + } + } + } + + public static class Manifest { + private InputStream in; + + private Manifest(InputStream in) { + this.in = in; + } + + static Manifest fromSourceFile(String fileName) throws Exception { + String pathName = System.getProperty("test.src") + File.separator + fileName; + return new Manifest(new FileInputStream(pathName)); + } + + // Example: + // String manifest = "Premain-Class: RedefineClassHelper\n" + + // "Can-Redefine-Classes: true\n"; + // ClassFileInstaller.writeJar("redefineagent.jar", + // ClassFileInstaller.Manifest.fromString(manifest), + // "RedefineClassHelper"); + static Manifest fromString(String manifest) throws Exception { + return new Manifest(new ByteArrayInputStream(manifest.getBytes())); + } + + public InputStream getInputStream() { + return in; } } + private static void writeJar(String jarFile, Manifest manifest, String classes[], int from, int to) throws Exception { + if (DEBUG) { + System.out.println("ClassFileInstaller: Writing to " + getJarPath(jarFile)); + } + + (new File(jarFile)).delete(); + FileOutputStream fos = new FileOutputStream(jarFile); + ZipOutputStream zos = new ZipOutputStream(fos); + + // The manifest must be the first or second entry. See comments in JarInputStream + // constructor and JDK-5046178. + if (manifest != null) { + writeToDisk(zos, "META-INF/MANIFEST.MF", manifest.getInputStream()); + } + + for (int i=from; i 0) { pathName = prependPath + "/" + pathName; } - writeToDisk(pathName, is); + writeToDisk(zos, pathName, is); } public static void writeClassToDisk(String className, byte[] bytecode) throws Exception { - writeClassToDisk(className, bytecode, ""); + writeClassToDisk(null, className, bytecode); + } + private static void writeClassToDisk(ZipOutputStream zos, String className, byte[] bytecode) throws Exception { + writeClassToDisk(zos, className, bytecode, ""); } public static void writeClassToDisk(String className, byte[] bytecode, String prependPath) throws Exception { + writeClassToDisk(null, className, bytecode, prependPath); + } + private static void writeClassToDisk(ZipOutputStream zos, String className, byte[] bytecode, String prependPath) throws Exception { // Convert dotted class name to a path to a class file String pathName = className.replace('.', '/').concat(".class"); if (prependPath.length() > 0) { pathName = prependPath + "/" + pathName; } - writeToDisk(pathName, new ByteArrayInputStream(bytecode)); + writeToDisk(zos, pathName, new ByteArrayInputStream(bytecode)); } - - private static void writeToDisk(String pathName, InputStream is) throws Exception { - // Create the class file's package directory - Path p = Paths.get(pathName); - if (pathName.contains("/")) { - Files.createDirectories(p.getParent()); + private static void writeToDisk(ZipOutputStream zos, String pathName, InputStream is) throws Exception { + if (DEBUG) { + System.out.println("ClassFileInstaller: Writing " + pathName); } - // Create the class file - Files.copy(is, p, StandardCopyOption.REPLACE_EXISTING); + if (zos != null) { + ZipEntry ze = new ZipEntry(pathName); + zos.putNextEntry(ze); + byte[] buf = new byte[1024]; + int len; + while ((len = is.read(buf))>0){ + zos.write(buf, 0, len); + } + } else { + // Create the class file's package directory + Path p = Paths.get(pathName); + if (pathName.contains("/")) { + Files.createDirectories(p.getParent()); + } + // Create the class file + Files.copy(is, p, StandardCopyOption.REPLACE_EXISTING); + } + is.close(); } } diff -r 3414aeff4a80 -r ee1b8619eddb hotspot/test/testlibrary/jittester/src/jdk/test/lib/jittester/visitors/JavaCodeVisitor.java --- a/hotspot/test/testlibrary/jittester/src/jdk/test/lib/jittester/visitors/JavaCodeVisitor.java Mon Apr 18 16:18:56 2016 +0100 +++ b/hotspot/test/testlibrary/jittester/src/jdk/test/lib/jittester/visitors/JavaCodeVisitor.java Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -164,21 +164,15 @@ code.append(node.getChildren().stream() .map(p -> p.accept(this)) .collect(Collectors.joining("][", "[", "]"))); - code.append(";\n") - .append(PrintingUtils.align(node.getParent().getLevel())) + code.append(";\n"); + if (!TypeList.isBuiltIn(arrayType)) { + code.append(PrintingUtils.align(node.getParent().getLevel())) .append("java.util.Arrays.fill(") .append(name) - .append(", "); - if (TypeList.find("boolean") == arrayType) { - code.append("false"); - } else if (TypeList.isBuiltIn(arrayType)) { - code.append("0"); - } else { - code.append("new ") + .append(", new ") .append(type) - .append("()"); + .append("());\n"); } - code.append(");\n"); return code.toString(); } diff -r 3414aeff4a80 -r ee1b8619eddb make/CompileJavaModules.gmk --- a/make/CompileJavaModules.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/CompileJavaModules.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -469,32 +469,7 @@ ################################################################################ # Setup the compilation for the module # -# Order src dirs in order of override with the most important first. Generated -# source before static source and platform specific source before shared. -# -GENERATED_SRC_DIRS += \ - $(SUPPORT_OUTPUTDIR)/gensrc \ - # - -TOP_SRC_DIRS += \ - $(HOTSPOT_TOPDIR)/src \ - $(CORBA_TOPDIR)/src \ - $(JDK_TOPDIR)/src \ - $(LANGTOOLS_TOPDIR)/src \ - $(JAXP_TOPDIR)/src \ - $(JAXWS_TOPDIR)/src \ - $(NASHORN_TOPDIR)/src \ - # - -SRC_SUBDIRS += $(OPENJDK_TARGET_OS)/classes -ifneq ($(OPENJDK_TARGET_OS), $(OPENJDK_TARGET_OS_TYPE)) - SRC_SUBDIRS += $(OPENJDK_TARGET_OS_TYPE)/classes -endif -SRC_SUBDIRS += share/classes - -MODULE_SRC_DIRS := $(strip \ - $(addsuffix /$(MODULE), $(GENERATED_SRC_DIRS) $(IMPORT_MODULES_SRC)) \ - $(foreach sub, $(SRC_SUBDIRS), $(addsuffix /$(MODULE)/$(sub), $(TOP_SRC_DIRS)))) +MODULE_SRC_DIRS := $(call FindModuleSrcDirs, $(MODULE)) # The JDK_USER_DEFINED_FILTER is a poor man's incremental build: by specifying # JDK_FILTER at the make command line, only a subset of the JDK java files will @@ -502,27 +477,20 @@ # space separated list. JDK_USER_DEFINED_FILTER := $(strip $(subst $(COMMA),$(SPACE), $(JDK_FILTER))) -# Rewrite the MODULE_SRC_DIRS with a wildcard for the module so that all module -# source dirs are available on the path. -MODULESOURCEPATH := $(subst $(SPACE),$(PATH_SEP),$(subst $(MODULE),*,$(MODULE_SRC_DIRS))) +# Get the complete module source path. +MODULESOURCEPATH := $(call GetModuleSrcPath) -# Add imported modules to the moduleclasspath -MODULECLASSPATH := $(subst $(SPACE),$(PATH_SEP), $(IMPORT_MODULES_CLASSES)) +# Add imported modules to the modulepath +MODULEPATH := $(call PathList, $(IMPORT_MODULES_CLASSES)) ifeq ($(MODULE), jdk.vm.ci) ## WORKAROUND jdk.vm.ci source structure issue JVMCI_MODULESOURCEPATH := $(MODULESOURCEPATH) \ $(subst /$(MODULE)/,/*/, $(filter-out %processor/src, \ $(wildcard $(HOTSPOT_TOPDIR)/src/jdk.vm.ci/share/classes/*/src))) - MODULESOURCEPATH := $(subst $(SPACE),$(PATH_SEP), $(JVMCI_MODULESOURCEPATH)) + MODULESOURCEPATH := $(call PathList, $(JVMCI_MODULESOURCEPATH)) endif -# Make sure the generated source base dirs exist. Not all modules have generated -# source in all of these directories and because of timing, all of them might not -# exist at the time this makefile gets called. Javac will complain if there are -# missing directories in the moduleclasspath. -$(call MakeDir, $(GENERATED_SRC_DIRS)) - $(eval $(call SetupJavaCompilation, $(MODULE), \ SETUP := $(if $($(MODULE)_SETUP), $($(MODULE)_SETUP), GENERATE_JDKBYTECODE), \ MODULE := $(MODULE), \ @@ -532,8 +500,8 @@ HEADERS := $(SUPPORT_OUTPUTDIR)/headers, \ ADD_JAVAC_FLAGS := \ $($(MODULE)_ADD_JAVAC_FLAGS) \ - -modulesourcepath "$(MODULESOURCEPATH)" \ - $(if $(MODULECLASSPATH), -modulepath "$(MODULECLASSPATH)") \ + -modulesourcepath $(MODULESOURCEPATH) \ + -modulepath $(MODULEPATH) \ -system none, \ )) @@ -574,8 +542,9 @@ ifneq ($(wildcard $(IMPORT_MODULES_CLASSES)/$(MODULE)), ) $(JDK_OUTPUTDIR)/modules/$(MODULE)/_imported.marker: \ $(call CacheFind, $(IMPORT_MODULES_CLASSES)/$(MODULE)) - $(RM) -r $(@D) - $(MKDIR) -p $(@D) + $(call MakeDir, $(@D)) + # Do not delete marker and build meta data files + $(RM) -r $(filter-out $(@D)/_%, $(wildcard $(@D)/*)) $(CP) -R $(IMPORT_MODULES_CLASSES)/$(MODULE)/* $(@D)/ $(TOUCH) $@ diff -r 3414aeff4a80 -r ee1b8619eddb make/GensrcModuleInfo.gmk --- a/make/GensrcModuleInfo.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/GensrcModuleInfo.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -49,7 +49,6 @@ include $(SPEC) include MakeBase.gmk include Modules.gmk -#include TextFileProcessing.gmk ################################################################################ # Define this here since jdk/make/Tools.gmk cannot be included from the top @@ -64,25 +63,8 @@ # Name of data file. Keep module-info.java.ext until javafx has changed. MOD_FILENAME := module-info.java.extra module-info.java.ext -# List all the possible sub directories inside a module source directory where -# data might be stored. -CLASSES_SUBDIRS += $(OPENJDK_TARGET_OS)/classes -ifneq ($(OPENJDK_TARGET_OS), $(OPENJDK_TARGET_OS_TYPE)) - CLASSES_SUBDIRS += $(OPENJDK_TARGET_OS_TYPE)/classes -endif -CLASSES_SUBDIRS += share/classes - -# TODO: When the deploy build is better integrated, this will get added globally -# but for now need to add it here. -ifeq ($(BUILD_DEPLOY), true) - ALL_TOP_SRC_DIRS += $(DEPLOY_TOPDIR)/src -endif - # Construct all possible src directories for the module. -MODULE_CLASSES_DIRS := $(strip \ - $(foreach sub, $(CLASSES_SUBDIRS), \ - $(addsuffix /$(MODULE)/$(sub), $(ALL_TOP_SRC_DIRS))) \ - $(addsuffix /$(MODULE), $(IMPORT_MODULES_SRC))) +MODULE_CLASSES_DIRS := $(call FindModuleSrcDirs, $(MODULE)) # Find all the .extra files in the src dirs. MOD_FILES := $(wildcard $(foreach f, $(MOD_FILENAME), $(addsuffix /$(f), \ @@ -125,20 +107,6 @@ TARGETS += $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/module-info.java endif -# This doesn't work because javac only accepts one single exports line per -# exported package. - # Restore the modifications to separate lines with spaces -# MODIFICATIONS := $(subst /,$(SPACE),$(MODIFICATIONS)) - -# ifneq ($(MODIFICATIONS), ) -# $(eval $(call SetupTextFileProcessing, PROCESS_MODULE_INFO, \ -# SOURCE_FILES := $(firstword $(call FindAllModuleInfos, $(MODULE))), \ -# OUTPUT_FILE := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/module-info.java, \ -# REPLACEMENTS := } => $(MODIFICATIONS) }, \ -# )) - -# TARGETS += $(PROCESS_MODULE_INFO) -# endif endif # If no modifications are found for this module, remove any module-info.java diff -r 3414aeff4a80 -r ee1b8619eddb make/Javadoc.gmk --- a/make/Javadoc.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/Javadoc.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -235,6 +235,11 @@ JRE_API_DOCSDIR = $(DOCSDIR)/jre/api PLATFORM_DOCSDIR = $(DOCSDIR)/platform +JAVADOC_ARCHIVE_NAME := jdk-$(VERSION_STRING)-docs.zip +JAVADOC_ARCHIVE_ASSEMBLY_DIR := $(DOCSTMPDIR)/zip-docs +JAVADOC_ARCHIVE_DIR := $(OUTPUT_ROOT)/bundles +JAVADOC_ARCHIVE := $(JAVADOC_ARCHIVE_DIR)/$(JAVADOC_ARCHIVE_NAME) + # The core api index file is the target for the core api javadocs rule # and needs to be defined early so that all other javadoc rules may # depend on it. @@ -378,6 +383,13 @@ all: docs docs: coredocs otherdocs +# +# Optional target which bundles all generated javadocs into a zip archive. +# The dependency on docs is handled in Main.gmk. +# + +zip-docs: $(JAVADOC_ARCHIVE) + ############################################################# # # coredocs @@ -1671,6 +1683,28 @@ otherdocs: $(ALL_OTHER_TARGETS) +# +# Add the core docs as prerequisite to the archive to trigger a rebuild +# if the core docs were rebuilt. Ideally any doc rebuild should trigger +# this, but the way prerequisites are currently setup in this file, that +# is hard to achieve. +# + +$(JAVADOC_ARCHIVE): $(COREAPI_INDEX_FILE) + $(call LogInfo, Compressing javadoc to single $(JAVADOC_ARCHIVE_NAME)) + $(MKDIR) -p $(JAVADOC_ARCHIVE_DIR) + $(RM) -r $(JAVADOC_ARCHIVE_ASSEMBLY_DIR) + $(MKDIR) -p $(JAVADOC_ARCHIVE_ASSEMBLY_DIR) + all_roots=`$(FIND) $(DOCSDIR) | $(GREP) index.html | grep -v old/doclet`; \ + pushd $(JAVADOC_ARCHIVE_ASSEMBLY_DIR); \ + for index_file in $${all_roots} ; do \ + target_dir=`dirname $${index_file}`; \ + name=`$(ECHO) $${target_dir} | $(SED) "s;/spec;;" | $(SED) "s;.*/;;"`; \ + $(LN) -s $${target_dir} $${name}; \ + done; \ + $(ZIP) -q -r $(JAVADOC_ARCHIVE) * ; \ + popd ; + ############################################################# .PHONY: all docs coredocs otherdocs \ - $(ALL_OTHER_TARGETS) + $(ALL_OTHER_TARGETS) zip-docs diff -r 3414aeff4a80 -r ee1b8619eddb make/Main.gmk --- a/make/Main.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/Main.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -333,6 +333,9 @@ docs-jvmtidoc: +($(CD) $(SRC_ROOT)/make && $(MAKE) $(MAKE_ARGS) -f Javadoc.gmk jvmtidocs) +zip-docs: docs-javadoc docs-jvmtidoc + +($(CD) $(SRC_ROOT)/make && $(MAKE) $(MAKE_ARGS) -f Javadoc.gmk zip-docs) + ALL_TARGETS += docs-javadoc docs-jvmtidoc ################################################################################ @@ -385,9 +388,27 @@ build-test-lib: +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f BuildTestLib.gmk) +ifeq ($(BUILD_FAILURE_HANDLER), true) + # Builds the failure handler jtreg extension + build-test-failure-handler: + +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \ + -f BuildFailureHandler.gmk build) + + # Runs the tests for the failure handler jtreg extension + test-failure-handler: + +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \ + -f BuildFailureHandler.gmk test) + + # Copies the failure handler jtreg extension into the test image + test-image-failure-handler: + +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) \ + -f BuildFailureHandler.gmk images) +endif + ALL_TARGETS += prepare-test-image build-test-hotspot-jtreg-native \ test-image-hotspot-jtreg-native build-test-jdk-jtreg-native \ - test-image-jdk-jtreg-native build-test-lib + test-image-jdk-jtreg-native build-test-lib build-test-failure-handler \ + test-failure-handler test-image-failure-handler ################################################################################ # Run tests @@ -582,6 +603,12 @@ build-test-lib: java + build-test-failure-handler: interim-langtools + + test-failure-handler: build-test-failure-handler + + test-image-failure-handler: build-test-failure-handler + build-test-hotspot-jtreg-native: buildtools-jdk build-test-jdk-jtreg-native: buildtools-jdk @@ -667,11 +694,11 @@ endif # This target builds the documentation image -docs-image: docs-javadoc docs-jvmtidoc +docs-image: zip-docs # This target builds the test image test-image: prepare-test-image test-image-hotspot-jtreg-native \ - test-image-jdk-jtreg-native + test-image-jdk-jtreg-native test-image-failure-handler # all-images is the top-most target, it builds all our deliverables ("images"). all-images: product-images test-image docs-image @@ -691,7 +718,7 @@ docs: docs-image all: all-images -ALL_TARGETS += default jdk images docs all +ALL_TARGETS += default jdk images docs all zip-docs ################################################################################ ################################################################################ diff -r 3414aeff4a80 -r ee1b8619eddb make/MainSupport.gmk --- a/make/MainSupport.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/MainSupport.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -104,6 +104,7 @@ @$(PRINTF) "\n" $(LOG_DEBUG) $(RM) -r $(SUPPORT_OUTPUTDIR)/docs $(RM) -r $(IMAGES_OUTPUTDIR)/docs + $(RM) $(OUTPUT_ROOT)/bundles/jdk-*-docs.zip @$(PRINTF) " done\n" endef diff -r 3414aeff4a80 -r ee1b8619eddb make/common/MakeBase.gmk --- a/make/common/MakeBase.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/common/MakeBase.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -723,12 +723,13 @@ endif ################################################################################ -# Return a string suitable for use after a -classpath option. It will correct and safe to use -# on all platforms. Arguments are given as space separate classpath entries. +# Return a string suitable for use after a -classpath or -modulepath option. It +# will be correct and safe to use on all platforms. Arguments are given as space +# separate classpath entries. Safe for multiple nested calls. # param 1 : A space separated list of classpath entries # The surrounding strip is needed to keep additional whitespace out PathList = \ - "$(subst $(SPACE),$(PATH_SEP),$(strip $1))" + "$(subst $(SPACE),$(PATH_SEP),$(strip $(subst $(DQUOTE),,$1)))" ################################################################################ diff -r 3414aeff4a80 -r ee1b8619eddb make/common/Modules.gmk --- a/make/common/Modules.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/make/common/Modules.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -138,26 +138,35 @@ ################################################################################ # Module list macros -# Use append so that the custom extension may add to this variable +# Use append so that the custom extension may add to these variables -ALL_TOP_SRC_DIRS += \ +GENERATED_SRC_DIRS += \ + $(SUPPORT_OUTPUTDIR)/gensrc \ + # + +TOP_SRC_DIRS += \ + $(CORBA_TOPDIR)/src \ $(HOTSPOT_TOPDIR)/src \ $(JDK_TOPDIR)/src \ $(LANGTOOLS_TOPDIR)/src \ - $(CORBA_TOPDIR)/src \ $(JAXP_TOPDIR)/src \ $(JAXWS_TOPDIR)/src \ $(NASHORN_TOPDIR)/src \ # +SRC_SUBDIRS += $(OPENJDK_TARGET_OS)/classes +ifneq ($(OPENJDK_TARGET_OS), $(OPENJDK_TARGET_OS_TYPE)) + SRC_SUBDIRS += $(OPENJDK_TARGET_OS_TYPE)/classes +endif +SRC_SUBDIRS += share/classes + # Find all module-info.java files for the current build target platform and # configuration. # Param 1 - Module to find for, set to * for finding all FindAllModuleInfos = \ $(wildcard \ - $(patsubst %,%/$(strip $1)/$(OPENJDK_TARGET_OS)/classes/module-info.java, $(ALL_TOP_SRC_DIRS)) \ - $(patsubst %,%/$(strip $1)/$(OPENJDK_TARGET_OS_TYPE)/classes/module-info.java, $(ALL_TOP_SRC_DIRS)) \ - $(patsubst %,%/$(strip $1)/share/classes/module-info.java, $(ALL_TOP_SRC_DIRS)) \ + $(foreach sub, $(SRC_SUBDIRS), \ + $(patsubst %,%/$(strip $1)/$(sub)/module-info.java, $(TOP_SRC_DIRS))) \ $(patsubst %,%/$(strip $1)/module-info.java, $(IMPORT_MODULES_SRC))) # Extract the module names from the paths of module-info.java files. The @@ -178,6 +187,19 @@ FindImportedModules = \ $(if $(IMPORT_MODULES_CLASSES), $(notdir $(wildcard $(IMPORT_MODULES_CLASSES)/*))) +# Find all source dirs for a particular module +# $1 - Module to find source dirs for +FindModuleSrcDirs = \ + $(strip $(wildcard \ + $(addsuffix /$(strip $1), $(GENERATED_SRC_DIRS) $(IMPORT_MODULES_SRC)) \ + $(foreach sub, $(SRC_SUBDIRS), $(addsuffix /$(strip $1)/$(sub), $(TOP_SRC_DIRS))))) + +# Construct the complete module source path +GetModuleSrcPath = \ + $(call PathList, \ + $(addsuffix /*, $(GENERATED_SRC_DIRS) $(IMPORT_MODULES_SRC)) \ + $(foreach sub, $(SRC_SUBDIRS), $(addsuffix /*/$(sub), $(TOP_SRC_DIRS)))) + ################################################################################ # Extract module dependencies from module-info.java files. diff -r 3414aeff4a80 -r ee1b8619eddb make/jprt.properties --- a/make/jprt.properties Mon Apr 18 16:18:56 2016 +0100 +++ b/make/jprt.properties Wed Jul 05 21:35:27 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -311,13 +311,13 @@ # Platforms built for hotspot push jobs my.build.targets.hotspot= \ - solaris_sparcv9_5.11-{product|fastdebug}, \ + solaris_sparcv9_5.11-{product|fastdebug}, \ solaris_x64_5.11-{product|fastdebug}, \ linux_i586_3.8-{product|fastdebug}, \ - linux_x64_3.8-{product|fastdebug}, \ + linux_x64_3.8-{product|fastdebug}, \ macosx_x64_10.9-{product|fastdebug}, \ windows_i586_6.3-{product|fastdebug}, \ - windows_x64_6.3-{product|fastdebug}, \ + windows_x64_6.3-{product|fastdebug}, \ solaris_x64_5.11-{fastdebugOpen}, \ linux_x64_3.8-{productOpen}, \ ${my.additional.build.targets.hotspot} @@ -346,18 +346,15 @@ solaris_x64_5.11-{product|fastdebug}-c2-GCBasher_G1 my.test.targets.hotspot.linux.i586= \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-jvm98, \ + linux_i586_3.8-{product|fastdebug}-c2-jvm98, \ linux_i586_3.8-{product|fastdebug}-c2-jvm98_nontiered, \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-scimark, \ - linux_i586_3.8-product-c1-runThese8_Xcomp_lang, \ - linux_i586_3.8-product-c1-runThese8_Xcomp_vm, \ - linux_i586_3.8-fastdebug-c1-runThese8_Xshare, \ + linux_i586_3.8-{product|fastdebug}-c2-scimark, \ linux_i586_3.8-fastdebug-c2-runThese8_Xcomp_lang, \ linux_i586_3.8-fastdebug-c2-runThese8_Xcomp_vm, \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ - linux_i586_3.8-{product|fastdebug}-{c1|c2}-GCBasher_G1 + linux_i586_3.8-{product|fastdebug}-c2-GCBasher_SerialGC, \ + linux_i586_3.8-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + linux_i586_3.8-{product|fastdebug}-c2-GCBasher_CMS, \ + linux_i586_3.8-{product|fastdebug}-c2-GCBasher_G1 my.test.targets.hotspot.linux.x64= \ linux_x64_3.8-{product|fastdebug}-c2-jvm98, \ @@ -378,17 +375,16 @@ macosx_x64_10.9-{product|fastdebug}-c2-GCBasher_G1 my.test.targets.hotspot.windows.i586= \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-jvm98, \ + windows_i586_6.3-{product|fastdebug}-c2-jvm98, \ windows_i586_6.3-{product|fastdebug}-c2-jvm98_nontiered, \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-scimark, \ - windows_i586_6.3-product-{c1|c2}-runThese8, \ - windows_i586_6.3-product-{c1|c2}-runThese8_Xcomp_lang, \ - windows_i586_6.3-product-{c1|c2}-runThese8_Xcomp_vm, \ - windows_i586_6.3-fastdebug-c1-runThese8_Xshare, \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ - windows_i586_6.3-{product|fastdebug}-{c1|c2}-GCBasher_G1 + windows_i586_6.3-{product|fastdebug}-c2-scimark, \ + windows_i586_6.3-product-c2-runThese8, \ + windows_i586_6.3-product-c2-runThese8_Xcomp_lang, \ + windows_i586_6.3-product-c2-runThese8_Xcomp_vm, \ + windows_i586_6.3-{product|fastdebug}-c2-GCBasher_SerialGC, \ + windows_i586_6.3-{product|fastdebug}-c2-GCBasher_ParallelGC, \ + windows_i586_6.3-{product|fastdebug}-c2-GCBasher_CMS, \ + windows_i586_6.3-{product|fastdebug}-c2-GCBasher_G1 my.test.targets.hotspot.windows.x64= \ windows_x64_6.3-{product|fastdebug}-c2-jvm98, \ @@ -443,22 +439,21 @@ linux_x64_3.8-fastdebug-c2-GROUP, \ macosx_x64_10.9-fastdebug-c2-GROUP, \ windows_i586_6.3-fastdebug-c2-GROUP, \ - windows_x64_6.3-fastdebug-c2-GROUP, \ - linux_i586_3.8-fastdebug-c1-GROUP, \ - windows_i586_6.3-fastdebug-c1-GROUP + windows_x64_6.3-fastdebug-c2-GROUP # Hotspot jtreg tests -my.make.rule.test.targets.hotspot.reg= \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler_1}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler_2}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler_3}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_compiler_closed}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_gc}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_gc_closed}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_gc_gcold}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_runtime}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_serviceability}, \ - ${my.make.rule.test.targets.hotspot.reg.group:GROUP=jdk_svc_sanity}, \ +my.make.rule.test.targets.hotspot.reg= \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_compiler_1}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_compiler_2}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_compiler_3}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_compiler_closed}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_gc_1}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_gc_2}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_gc_closed}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_gc_gcold}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_runtime}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_fast_serviceability}, \ + ${my.make.rule.test.targets.hotspot.reg.group:GROUP=jdk_svc_sanity}, \ ${my.additional.make.rule.test.targets.hotspot.reg} # Other Makefile based Hotspot tests diff -r 3414aeff4a80 -r ee1b8619eddb make/test/BuildFailureHandler.gmk --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/test/BuildFailureHandler.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,128 @@ +# +# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +default: build + +include $(SPEC) +include MakeBase.gmk +include JavaCompilation.gmk +include SetupJavaCompilers.gmk +include NativeCompilation.gmk + +TARGETS := + +################################################################################ + +FH_BASEDIR := $(SRC_ROOT)/test/failure_handler +FH_SUPPORT := $(SUPPORT_OUTPUTDIR)/test/failure_handler +FH_JAR := $(FH_SUPPORT)/jtregFailureHandler.jar + +JTREG_JAR := $(JT_HOME)/lib/jtreg.jar +ifeq ($(wildcard $(JTREG_JAR)), ) + $(error Cannot build failure handler without jtreg) +endif +# tools.jar is only needed if it exists in the boot jdk +TOOLS_JAR := $(wildcard $(BOOT_JDK)/lib/tools.jar) + +FH_CLASSPATH := $(call PathList, $(JTREG_JAR) $(TOOLS_JAR)) + +$(eval $(call SetupJavaCompilation, BUILD_FAILURE_HANDLER, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := $(FH_BASEDIR)/src/share/classes $(FH_BASEDIR)/src/share/conf, \ + BIN := $(FH_SUPPORT)/classes, \ + COPY := .properties, \ + CLASSPATH := $(JTREG_JAR) $(TOOLS_JAR), \ + JAR := $(FH_JAR), \ +)) + +TARGETS += $(BUILD_FAILURE_HANDLER) + +################################################################################ + +ifeq ($(OPENJDK_TARGET_OS), windows) + + $(eval $(call SetupNativeCompilation, BUILD_LIBTIMEOUT_HANDLER, \ + LIBRARY := timeoutHandler, \ + SRC := $(FH_BASEDIR)/src/windows/native/libtimeoutHandler, \ + OBJECT_DIR := $(FH_SUPPORT)/libtimeoutHandler, \ + OUTPUT_DIR := $(FH_SUPPORT), \ + CFLAGS := $(CFLAGS_JDKLIB), \ + LDFLAGS := $(LDFLAGS_JDKLIB), \ + OPTIMIZATION := LOW, \ + )) + + TARGETS += $(BUILD_LIBTIMEOUT_HANDLER) + +endif + +################################################################################ +# Targets for building test-image. +################################################################################ + +# Copy to hotspot jtreg test image +$(eval $(call SetupCopyFiles, COPY_FH, \ + SRC := $(FH_SUPPORT), \ + DEST := $(TEST_IMAGE_DIR)/failure_handler, \ + FILES := $(FH_JAR) $(BUILD_LIBTIMEOUT_HANDLER), \ +)) + +IMAGES_TARGETS += $(COPY_FH) + +################################################################################ +# Test the failure handler itself +################################################################################ +# +# Use JTREG_TEST_OPTS for test VM options +# Use JTREG_TESTS for jtreg tests parameter +# +RUN_DIR := $(FH_SUPPORT)/test +# Add the dir of the dll to the path on windows +ifeq ($(OPENJDK_TARGET_OS), windows) + export PATH := $(PATH);$(FH_SUPPORT) +endif + +test: + $(RM) -r $(RUN_DIR) + $(MKDIR) -p $(RUN_DIR) + $(CD) $(FH_BASEDIR)/test && JT_JAVA=$(BOOT_JDK) $(JTREGEXE) \ + -jdk:$(BOOT_JDK) \ + $(JTREG_TEST_OPTS) \ + -timeout:0.1 -va -retain:all \ + -noreport \ + -agentvm \ + -thd:$(FH_JAR) \ + -th:jdk.test.failurehandler.jtreg.GatherProcessInfoTimeoutHandler \ + -od:$(FH_JAR) \ + -o:jdk.test.failurehandler.jtreg.GatherDiagnosticInfoObserver \ + -w:$(RUN_DIR)/JTwork -r:$(RUN_DIR)/JTreport \ + $(if $(JTREG_TESTS), $(JTREG_TESTS), .) \ + || true + +################################################################################ + +build: $(TARGETS) +images: $(IMAGES_TARGETS) + +.PHONY: all images test diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/Makefile --- a/test/failure_handler/Makefile Mon Apr 18 16:18:56 2016 +0100 +++ b/test/failure_handler/Makefile Wed Jul 05 21:35:27 2017 +0200 @@ -29,7 +29,7 @@ CLASSES_DIR := ${BUILD_DIR}/classes IMAGE_DIR := ${BUILD_DIR}/image RUN_DIR := $(shell pwd)/run - +CLASSPATH := ${JTREG_HOME}/lib/jtreg.jar:${JAVA_HOME}/lib/tools.jar SRC_DIR := src/share/classes/ SOURCES := ${SRC_DIR}/jdk/test/failurehandler/*.java \ ${SRC_DIR}/jdk/test/failurehandler/action/*.java \ @@ -47,9 +47,12 @@ ifeq ("${OS_NAME}", "Cygwin") BUILD_DIR := $(shell cygpath -m "${BUILD_DIR}") CLASSES_DIR := $(shell cygpath -m "${CLASSES_DIR}") -IMAGE_DIR := $(shell cygpath -m "${IMAGE_DIR}") RUN_DIR := $(shell cygpath -m "${RUN_DIR}") +IMAGE_DIR := $(shell cygpath -m "${IMAGE_DIR}") +RUN_DIR := $(shell cygpath -m "${RUN_DIR}") SRC_DIR := $(shell cygpath -m "${SRC_DIR}") +JAVA_HOME := $(shell cygpath -m "${JAVA_HOME}") JTREG_HOME := $(shell cygpath -m "${JTREG_HOME}") +CLASSPATH := $(shell cygpath -pm "${CLASSPATH}") CC := "cl.exe" endif @@ -57,33 +60,33 @@ native: require_env ifeq ("${OS_NAME}", "Cygwin") - "${CC}" src/windows/native/jdk/test/failurehandler/jtreg/*.c \ - -I"$(shell cygpath -w ${JAVA_HOME}/include)" \ - -I"$(shell cygpath -w ${JAVA_HOME}/include/win32)" \ - /link /MACHINE:X64 /DLL /OUT:timeoutHandler.dll + "${CC}" src/windows/native/jdk/test/failurehandler/jtreg/*.c \ + -I"$(shell cygpath -w "${JAVA_HOME}/include")" \ + -I"$(shell cygpath -w "${JAVA_HOME}/include/win32")" \ + /link /DLL /OUT:timeoutHandler.dll endif check_defined = $(foreach 1,$1,$(__check_defined)) __check_defined = $(if $(value $1),, $(error $1 is not set)) classes: require_env - mkdir -p ${IMAGE_DIR}/bin ${IMAGE_DIR}/lib ${CLASSES_DIR} - "${JAVA_HOME}"/bin/javac -target ${JAVA_RELEASE} -source ${JAVA_RELEASE} \ - -sourcepath $(shell pwd) \ - -classpath ${JTREG_HOME}/lib/jtreg.jar:${JAVA_HOME}/lib/tools.jar \ - -d ${CLASSES_DIR} \ + mkdir -p ${IMAGE_DIR}/bin ${IMAGE_DIR}/lib ${CLASSES_DIR} + "${JAVA_HOME}"/bin/javac -target ${JAVA_RELEASE} -source ${JAVA_RELEASE} \ + -sourcepath "$(shell pwd)" \ + -cp "${CLASSPATH}" \ + -d ${CLASSES_DIR} \ ${SOURCES} - "${JAVA_HOME}"/bin/jar cf ${TARGET_JAR} -C ${CLASSES_DIR} . - "${JAVA_HOME}"/bin/jar uf ${TARGET_JAR} -C ${CONF_DIR} . + "${JAVA_HOME}"/bin/jar cf "${TARGET_JAR}" -C "${CLASSES_DIR}" . + "${JAVA_HOME}"/bin/jar uf "${TARGET_JAR}" -C "${CONF_DIR}" . # # Use JTREG_TEST_OPTS for test VM options # Use JTREG_TESTS for jtreg tests parameter # test: require_env build - rm -rf ${RUN_DIR} - mkdir -p ${RUN_DIR} - "${JTREG_HOME}"/bin/jtreg \ + rm -rf "${RUN_DIR}" + mkdir -p "${RUN_DIR}" + "${JTREG_HOME}"/bin/jtreg \ -jdk:"${JAVA_HOME}" \ ${JTREG_TEST_OPTS} \ -timeout:0.1 -va -retain:all \ @@ -93,7 +96,8 @@ -th:jdk.test.failurehandler.jtreg.GatherProcessInfoTimeoutHandler \ -od:"${TARGET_JAR}" \ -o:jdk.test.failurehandler.jtreg.GatherDiagnosticInfoObserver \ - -w:${RUN_DIR}/JTwork -r:${RUN_DIR}/JTreport \ + -w:"${RUN_DIR}/JTwork" \ + -r:"${RUN_DIR}/JTreport" \ $(if ${JTREG_TESTS}, ${JTREG_TESTS}, test) \ && false || true @@ -101,11 +105,11 @@ debug: test require_env: - $(call check_defined, JAVA_HOME) - $(call check_defined, JTREG_HOME) + $(call check_defined, JAVA_HOME) + $(call check_defined, JTREG_HOME) clean: - rm -rf "${BUILD_DIR}" "${RUN_DIR}" + rm -rf "${BUILD_DIR}" "${RUN_DIR}" build: classes native diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/README --- a/test/failure_handler/README Mon Apr 18 16:18:56 2016 +0100 +++ b/test/failure_handler/README Wed Jul 05 21:35:27 2017 +0200 @@ -36,11 +36,9 @@ BUILDING -To build a library, one should simply run make with 'JTREG_HOME' and -'JAVA_HOME' environment variables set. 'JAVA_HOME' should contain path to JDK, -'JTREG_HOME' -- path to jtreg. - -'image/lib/jtregFailureHandler.jar' is created on successful build. +The library is built using the top level build-test-failure-handler target and +is automatically included in the test image and picked up by hotspot and jdk +test makefiles. CONFIGURATION diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/src/share/classes/jdk/test/failurehandler/jtreg/GatherProcessInfoTimeoutHandler.java --- a/test/failure_handler/src/share/classes/jdk/test/failurehandler/jtreg/GatherProcessInfoTimeoutHandler.java Mon Apr 18 16:18:56 2016 +0100 +++ b/test/failure_handler/src/share/classes/jdk/test/failurehandler/jtreg/GatherProcessInfoTimeoutHandler.java Wed Jul 05 21:35:27 2017 +0200 @@ -39,12 +39,16 @@ * process and its children. */ public class GatherProcessInfoTimeoutHandler extends TimeoutHandler { + private static final boolean HAS_NATIVE_LIBRARY; static { + boolean value = true; try { System.loadLibrary("timeoutHandler"); } catch (UnsatisfiedLinkError ignore) { // not all os need timeoutHandler native-library + value = false; } + HAS_NATIVE_LIBRARY = value; } private static final String LOG_FILENAME = "processes.log"; private static final String OUTPUT_FILENAME = "processes.html"; @@ -105,7 +109,7 @@ if (result == 0L) { /* jtreg didn't find pid, most probably we are on JDK < 9 there is no Process::getPid */ - if ("windows".equals(OS.current().family)) { + if (HAS_NATIVE_LIBRARY && "windows".equals(OS.current().family)) { try { Field field = process.getClass().getDeclaredField("handle"); boolean old = field.isAccessible(); diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/src/windows/native/jdk/test/failurehandler/jtreg/GatherProcessInfoTimeoutHandler.c --- a/test/failure_handler/src/windows/native/jdk/test/failurehandler/jtreg/GatherProcessInfoTimeoutHandler.c Mon Apr 18 16:18:56 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -JNIEXPORT jlong JNICALL Java_jdk_test_failurehandler_jtreg_GatherProcessInfoTimeoutHandler_getWin32Pid - (JNIEnv* env, jobject o, jlong handle) { - return GetProcessId(handle); -} -#ifdef __cplusplus -} -#endif diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/src/windows/native/libtimeoutHandler/GatherProcessInfoTimeoutHandler.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/failure_handler/src/windows/native/libtimeoutHandler/GatherProcessInfoTimeoutHandler.c Wed Jul 05 21:35:27 2017 +0200 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +JNIEXPORT jlong JNICALL Java_jdk_test_failurehandler_jtreg_GatherProcessInfoTimeoutHandler_getWin32Pid + (JNIEnv* env, jobject o, jlong handle) { + return GetProcessId((HANDLE) handle); +} +#ifdef __cplusplus +} +#endif diff -r 3414aeff4a80 -r ee1b8619eddb test/failure_handler/test/sanity/Suicide.java --- a/test/failure_handler/test/sanity/Suicide.java Mon Apr 18 16:18:56 2016 +0100 +++ b/test/failure_handler/test/sanity/Suicide.java Wed Jul 05 21:35:27 2017 +0200 @@ -28,7 +28,7 @@ /* * @test * @summary Suicide test - * @run main/othervm Crash + * @run main/othervm Suicide */ public class Suicide { public static void main(String[] args) { diff -r 3414aeff4a80 -r ee1b8619eddb test/lib/sun/hotspot/WhiteBox.java --- a/test/lib/sun/hotspot/WhiteBox.java Mon Apr 18 16:18:56 2016 +0100 +++ b/test/lib/sun/hotspot/WhiteBox.java Wed Jul 05 21:35:27 2017 +0200 @@ -344,7 +344,13 @@ } public native Object[] getCodeBlob(long addr); - public native void clearInlineCaches(); + private native void clearInlineCaches0(boolean preserve_static_stubs); + public void clearInlineCaches() { + clearInlineCaches0(false); + } + public void clearInlineCaches(boolean preserve_static_stubs) { + clearInlineCaches0(preserve_static_stubs); + } // Intered strings public native boolean isInStringTable(String str); diff -r 3414aeff4a80 -r ee1b8619eddb test/make/TestMakeBase.gmk --- a/test/make/TestMakeBase.gmk Mon Apr 18 16:18:56 2016 +0100 +++ b/test/make/TestMakeBase.gmk Wed Jul 05 21:35:27 2017 +0200 @@ -254,4 +254,14 @@ but was $(call sequence, 5, 15)) endif +################################################################################ +# Test that PathList is safe when called multiple nested times. + +PATHLIST_INPUT := foo bar baz + +$(eval $(call assert-equals, \ + $(call PathList, $(call PathList, $(PATHLIST_INPUT))), \ + $(call PathList, $(PATHLIST_INPUT)), \ + PathList call not safe for calling twice)) + all: $(TEST_TARGETS)